summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins
parentInitial commit. (diff)
downloadansible-upstream.tar.xz
ansible-upstream.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/netapp.py228
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/santricity.py90
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host.py105
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host_detail.py102
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_lun_mapping.py141
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_storage_pool.py81
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_volume.py91
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/module_utils/netapp.py746
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/module_utils/santricity.py431
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py253
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py176
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py521
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py200
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py341
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py232
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py313
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py209
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py918
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py613
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py367
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py512
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py279
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py250
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py342
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py246
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py391
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py247
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py531
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py305
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py150
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py149
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py579
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py926
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py248
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py896
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py286
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py268
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py244
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py267
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py314
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py286
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py283
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py215
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py530
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py488
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py442
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py159
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py544
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py307
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py407
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py297
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py401
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py291
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py723
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py376
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py257
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py289
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py310
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py941
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py286
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py868
-rw-r--r--collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py431
62 files changed, 22719 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/netapp.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/netapp.py
new file mode 100644
index 00000000..b3943fb3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/netapp.py
@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2018, Sumit Kumar <sumit4@netapp.com>, chris Archibald <carchi@netapp.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series, ONTAP, SolidFire
+'''
+
+ # Documentation fragment for Cloud Volume Services on Azure NetApp (azure_rm_netapp)
+ AZURE_RM_NETAPP = r'''
+options:
+ resource_group:
+ description:
+ - Name of the resource group.
+ required: true
+ type: str
+requirements:
+ - python >= 2.7
+ - azure >= 2.0.0
+ - Python netapp-mgmt. Install using 'pip install netapp-mgmt'
+ - Python netapp-mgmt-netapp. Install using 'pip install netapp-mgmt-netapp'
+ - For authentication with Azure NetApp log in before you run your tasks or playbook with C(az login).
+
+notes:
+ - The modules prefixed with azure_rm_netapp are built to support the Cloud Volume Services for Azure NetApp Files.
+
+seealso:
+ - name: Sign in with Azure CLI
+ link: https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest
+ description: How to authenticate using the C(az login) command.
+ '''
+
+ # Documentation fragment for ONTAP (na_ontap)
+ NA_ONTAP = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ type: str
+ required: true
+ username:
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ type: str
+ required: true
+ aliases: [ user ]
+ password:
+ description:
+ - Password for the specified user.
+ type: str
+ required: true
+ aliases: [ pass ]
+ https:
+ description:
+ - Enable and disable https
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - If set to C(no), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: yes
+ http_port:
+ description:
+ - Override the default port (80 or 443) with this port
+ type: int
+ ontapi:
+ description:
+ - The ontap api version to use
+ type: int
+ use_rest:
+ description:
+ - REST API if supported by the target system for all the resources and attributes the module requires. Otherwise will revert to ZAPI.
+ - Always -- will always use the REST API
+ - Never -- will always use the ZAPI
+ - Auto -- will try to use the REST Api
+ default: Auto
+ choices: ['Never', 'Always', 'Auto']
+ type: str
+
+
+requirements:
+ - A physical or virtual clustered Data ONTAP system. The modules support Data ONTAP 9.1 and onward
+ - Ansible 2.6
+ - Python2 netapp-lib (2017.10.30) or later. Install using 'pip install netapp-lib'
+ - Python3 netapp-lib (2018.11.13) or later. Install using 'pip install netapp-lib'
+ - To enable http on the cluster you must run the following commands 'set -privilege advanced;' 'system services web modify -http-enabled true;'
+
+notes:
+ - The modules prefixed with na\\_ontap are built to support the ONTAP storage platform.
+
+'''
+
+ # Documentation fragment for ONTAP (na_cdot)
+ ONTAP = r'''
+options:
+ hostname:
+ required: true
+ description:
+ - The hostname or IP address of the ONTAP instance.
+ username:
+ required: true
+ description:
+ - This can be a Cluster-scoped or SVM-scoped account, depending on whether a Cluster-level or SVM-level API is required.
+ For more information, please read the documentation U(https://mysupport.netapp.com/NOW/download/software/nmsdk/9.4/).
+ aliases: ['user']
+ password:
+ required: true
+ description:
+ - Password for the specified user.
+ aliases: ['pass']
+requirements:
+ - A physical or virtual clustered Data ONTAP system. The modules were developed with Clustered Data ONTAP 8.3
+ - Ansible 2.2
+ - netapp-lib (2015.9.25). Install using 'pip install netapp-lib'
+
+notes:
+ - The modules prefixed with na\\_cdot are built to support the ONTAP storage platform.
+
+'''
+
+ # Documentation fragment for SolidFire
+ SOLIDFIRE = r'''
+options:
+ hostname:
+ required: true
+ description:
+ - The hostname or IP address of the SolidFire cluster.
+ username:
+ required: true
+ description:
+ - Please ensure that the user has the adequate permissions. For more information, please read the official documentation
+ U(https://mysupport.netapp.com/documentation/docweb/index.html?productID=62636&language=en-US).
+ aliases: ['user']
+ password:
+ required: true
+ description:
+ - Password for the specified user.
+ aliases: ['pass']
+
+requirements:
+ - The modules were developed with SolidFire 10.1
+ - solidfire-sdk-python (1.1.0.92) or greater. Install using 'pip install solidfire-sdk-python'
+
+notes:
+ - The modules prefixed with na\\_elementsw are built to support the SolidFire storage platform.
+
+'''
+
+ # Documentation fragment for E-Series
+ ESERIES = r'''
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity Web Services Proxy or Embedded Web Services API.
+ Example https://prod-1.wahoo.acme.com/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ required: false
+ type: str
+ default: 1
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+
+notes:
+ - The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
+ the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
+ - Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
+ - M(netapp_eseries.santricity.netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
+'''
+
+ # Documentation fragment for AWSCVS
+ AWSCVS = """
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The access key to authenticate with the AWSCVS Web Services Proxy or Embedded Web Services API.
+ secret_key:
+ required: true
+ type: str
+ description:
+ - The secret_key to authenticate with the AWSCVS Web Services Proxy or Embedded Web Services API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the AWSCVS Web Services Proxy or Embedded Web Services API.
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+notes:
+ - The modules prefixed with aws\\_cvs\\_netapp are built to Manage AWS Cloud Volume Service .
+"""
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/santricity.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/santricity.py
new file mode 100644
index 00000000..aa5cc5ea
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/doc_fragments/santricity.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r"""
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for the following NetApp Storage Platforms: E-Series
+"""
+
+ # Documentation fragment for E-Series
+ SANTRICITY_PROXY_DOC = r"""
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity Web Services Proxy or Embedded Web Services API.
+ - Example https://prod-1.wahoo.acme.com:8443/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+
+notes:
+ - The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
+ the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
+ - Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
+ - M(netapp_eseries.santricity.netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
+"""
+
+ # Documentation fragment for E-Series
+ SANTRICITY_DOC = r"""
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity Web Services Proxy or Embedded Web Services API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity Web Services Proxy or Embedded Web Services API.
+ - Example https://prod-1.wahoo.acme.com:8443/devmgr/v2
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ required: false
+ type: str
+ default: 1
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+
+notes:
+ - The E-Series Ansible modules require either an instance of the Web Services Proxy (WSP), to be available to manage
+ the storage-system, or an E-Series storage-system that supports the Embedded Web Services API.
+ - Embedded Web Services is currently available on the E2800, E5700, EF570, and newer hardware models.
+ - M(netapp_eseries.santricity.netapp_e_storage_system) may be utilized for configuring the systems managed by a WSP instance.
+"""
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host.py
new file mode 100644
index 00000000..b599b995
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host.py
@@ -0,0 +1,105 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: santricity_hosts
+ author: Nathan Swartz
+ short_description: Collects host information
+ description:
+ - Collects current host, expected host and host group inventory definitions.
+ options:
+ inventory:
+ description:
+ - E-Series storage array inventory, hostvars[inventory_hostname].
+ - Run na_santricity_facts prior to calling
+ required: True
+ type: complex
+"""
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, inventory, **kwargs):
+ if isinstance(inventory, list):
+ inventory = inventory[0]
+
+ if ("eseries_storage_pool_configuration" not in inventory or not isinstance(inventory["eseries_storage_pool_configuration"], list) or
+ len(inventory["eseries_storage_pool_configuration"]) == 0):
+ return list()
+
+ if "eseries_storage_pool_configuration" not in inventory.keys():
+ raise AnsibleError("eseries_storage_pool_configuration must be defined. See nar_santricity_host role documentation.")
+
+ info = {"current_hosts": {}, "expected_hosts": {}, "host_groups": {}}
+
+ groups = []
+ hosts = []
+ non_inventory_hosts = []
+ non_inventory_groups = []
+ for group in inventory["groups"].keys():
+ groups.append(group)
+ hosts.extend(inventory["groups"][group])
+
+ if "eseries_host_object" in inventory.keys():
+ non_inventory_hosts = [host["name"] for host in inventory["eseries_host_object"]]
+ non_inventory_groups = [host["group"] for host in inventory["eseries_host_object"] if "group" in host]
+
+ # Determine expected hosts and host groups
+ for storage_pool in inventory["eseries_storage_pool_configuration"]:
+ if "volumes" in storage_pool:
+ for volume in storage_pool["volumes"]:
+
+ if (("state" in volume and volume["state"] == "present") or
+ ("eseries_volume_state" in inventory and inventory["eseries_volume_state"] == "present") or
+ ("state" not in volume and "eseries_volume_state" not in inventory)):
+ if "host" in volume:
+ if volume["host"] in groups:
+
+ if volume["host"] not in info["host_groups"].keys():
+
+ # Add all expected group hosts
+ for expected_host in inventory["groups"][volume["host"]]:
+ if "host_type" in volume:
+ info["expected_hosts"].update({expected_host: {"state": "present",
+ "host_type": volume["host_type"],
+ "group": volume["host"]}})
+ elif "common_volume_configuration" in storage_pool and "host_type" in storage_pool["common_volume_configuration"]:
+ info["expected_hosts"].update({expected_host: {"state": "present",
+ "host_type": storage_pool["common_volume_configuration"]["host_type"],
+ "group": volume["host"]}})
+ elif "eseries_system_default_host_type" in inventory:
+ info["expected_hosts"].update({expected_host: {"state": "present",
+ "host_type": inventory["eseries_system_default_host_type"],
+ "group": volume["host"]}})
+ else:
+ info["expected_hosts"].update({expected_host: {"state": "present",
+ "group": volume["host"]}})
+
+ info["host_groups"].update({volume["host"]: inventory["groups"][volume["host"]]})
+
+ elif volume["host"] in hosts:
+ if "host_type" in volume:
+ info["expected_hosts"].update({volume["host"]: {"state": "present",
+ "host_type": volume["host_type"],
+ "group": None}})
+ elif "common_volume_configuration" in storage_pool and "host_type" in storage_pool["common_volume_configuration"]:
+ info["expected_hosts"].update({volume["host"]: {"state": "present",
+ "host_type": storage_pool["common_volume_configuration"]["host_type"],
+ "group": volume["host"]}})
+ elif "eseries_system_default_host_type" in inventory:
+ info["expected_hosts"].update({volume["host"]: {"state": "present",
+ "host_type": inventory["eseries_system_default_host_type"],
+ "group": volume["host"]}})
+ else:
+ info["expected_hosts"].update({volume["host"]: {"state": "present",
+ "group": None}})
+
+ elif volume["host"] not in non_inventory_hosts and volume["host"] not in non_inventory_groups:
+ raise AnsibleError("Expected host or host group does not exist in your Ansible inventory and is not specified in"
+ " eseries_host_object variable!")
+
+ return [info]
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host_detail.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host_detail.py
new file mode 100644
index 00000000..70bb699f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_host_detail.py
@@ -0,0 +1,102 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: santricity_hosts_detail
+ author: Nathan Swartz
+ short_description: Expands the host information from santricity_host lookup
+ description:
+ - Expands the host information from santricity_host lookup to include system and port information
+ options:
+ hosts:
+ description:
+ - E-Series storage array inventory, hostvars[inventory_hostname].
+ - Run na_santricity_facts prior to calling
+ required: True
+ type: list
+ hosts_info:
+ description:
+ - The registered results from the setup module from each expected_hosts, hosts_info['results'].
+ - Collected results from the setup module for each expected_hosts from the results of the santricity_host lookup plugin.
+ required: True
+ type: list
+ host_interface_ports:
+ description:
+ - List of dictionaries containing "stdout_lines" which is a list of iqn/wwpns for each expected_hosts from the results of
+ the santricity_host lookup plugin.
+ - Register the results from the shell module that is looped over each host in expected_hosts. The command issued should result
+ in a newline delineated list of iqns, nqns, or wwpns.
+ required: True
+ type: list
+ protocol:
+ description:
+ - Storage system interface protocol (iscsi, sas, fc, ib-iser, ib-srp, nvme_ib, or nvme_roce)
+ required: True
+ type: str
+
+"""
+import re
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+ def run(self, hosts, hosts_info, host_interface_ports, protocol, **kwargs):
+ if isinstance(hosts, list):
+ hosts = hosts[0]
+
+ if "expected_hosts" not in hosts:
+ raise AnsibleError("Invalid argument: hosts must contain the output from santricity_host lookup plugin.")
+ if not isinstance(hosts_info, list):
+ raise AnsibleError("Invalid argument: hosts_info must contain the results from the setup module for each"
+ " expected_hosts found in the output of the santricity_host lookup plugin.")
+ if not isinstance(host_interface_ports, list):
+ raise AnsibleError("Invalid argument: host_interface_ports must contain list of dictionaries containing 'stdout_lines' key"
+ " which is a list of iqns, nqns, or wwpns for each expected_hosts from the results of the santricity_host lookup plugin")
+ if protocol not in ["iscsi", "sas", "fc", "ib_iser", "ib_srp", "nvme_ib", "nvme_roce"]:
+ raise AnsibleError("Invalid argument: protocol must be a protocol from the following: iscsi, sas, fc, ib_iser, ib_srp, nvme_ib, nvme_roce.")
+
+ for host in hosts["expected_hosts"].keys():
+ sanitized_hostname = re.sub("[.:-]", "_", host)[:20]
+
+ # Add host information to expected host
+ for info in hosts_info:
+ if info["item"] == host:
+
+ # Determine host type
+ if "host_type" not in hosts["expected_hosts"][host].keys():
+ if info["ansible_facts"]["ansible_os_family"].lower() == "windows":
+ hosts["expected_hosts"][host]["host_type"] = "windows"
+ elif info["ansible_facts"]["ansible_os_family"].lower() in ["redhat", "debian", "suse"]:
+ hosts["expected_hosts"][host]["host_type"] = "linux dm-mp"
+
+ # Update hosts object
+ hosts["expected_hosts"][host].update({"sanitized_hostname": sanitized_hostname, "ports": []})
+
+ # Add SAS ports
+ for interface in host_interface_ports:
+ if interface["item"] == host and "stdout_lines" in interface.keys():
+ if protocol == "sas":
+ for index, address in enumerate([base[:-1] + str(index) for base in interface["stdout_lines"] for index in range(8)]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "sas", "label": label, "port": address})
+ elif protocol == "ib_iser" or protocol == "ib_srp":
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "ib", "label": label, "port": address})
+ elif protocol == "nvme_ib":
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "nvmeof", "label": label, "port": address})
+ elif protocol == "nvme_roce":
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": "nvmeof", "label": label, "port": address})
+ else:
+ for index, address in enumerate(interface["stdout_lines"]):
+ label = "%s_%s" % (sanitized_hostname, index)
+ hosts["expected_hosts"][host]["ports"].append({"type": protocol, "label": label, "port": address})
+
+ return [hosts]
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_lun_mapping.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_lun_mapping.py
new file mode 100644
index 00000000..6160d82c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_lun_mapping.py
@@ -0,0 +1,141 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+
+
+class LookupModule(LookupBase):
+ def run(self, array_facts, volumes, **kwargs):
+ if isinstance(array_facts, list):
+ array_facts = array_facts[0]
+
+ if isinstance(volumes, dict): # This means that there is only one volume and volumes was stripped of its list
+ volumes = [volumes]
+
+ if "storage_array_facts" not in array_facts.keys():
+ # Don't throw exceptions unless you want run to terminate!!!
+ # raise AnsibleError("Storage array information not available. Collect facts using na_santricity_facts module.")
+ return list()
+
+ # Remove any absent volumes
+ volumes = [vol for vol in volumes if "state" not in vol or vol["state"] == "present"]
+
+ self.array_facts = array_facts["storage_array_facts"]
+ self.luns_by_target = self.array_facts["netapp_luns_by_target"]
+ self.access_volume_lun = self.array_facts["netapp_default_hostgroup_access_volume_lun"]
+
+ # Search for volumes that have a specified host or host group initiator
+ mapping_info = list()
+ for volume in volumes:
+ if "host" in volume.keys():
+
+ # host initiator is already mapped on the storage system
+ if volume["host"] in self.luns_by_target:
+
+ used_luns = [lun for name, lun in self.luns_by_target[volume["host"]]]
+ for host_group in self.array_facts["netapp_host_groups"]:
+ if volume["host"] == host_group["name"]: # target is an existing host group
+ for host in host_group["hosts"]:
+ used_luns.extend([lun for name, lun in self.luns_by_target[host]])
+ break
+ elif volume["host"] in host_group["hosts"]: # target is an existing host in the host group.
+ used_luns.extend([lun for name, lun in self.luns_by_target[host_group["name"]]])
+ break
+
+ for name, lun in self.luns_by_target[volume["host"]]:
+
+ # Check whether volume is mapped to the expected host
+ if name == volume["name"]:
+ # Check whether lun option differs from existing lun
+ if "lun" in volume and volume["lun"] != lun:
+ self.change_volume_mapping_lun(volume["name"], volume["host"], volume["lun"])
+ lun = volume["lun"]
+
+ if lun in used_luns:
+ raise AnsibleError("Volume [%s] cannot be mapped to host or host group [%s] using lun number %s!"
+ % (name, volume["host"], lun))
+
+ mapping_info.append({"volume": volume["name"], "target": volume["host"], "lun": lun})
+ break
+
+ # Volume has not been mapped to host initiator
+ else:
+
+ # Check whether lun option has been used
+ if "lun" in volume:
+ if volume["lun"] in used_luns:
+ for target in self.array_facts["netapp_luns_by_target"].keys():
+ for mapped_volume, mapped_lun in [entry for entry in self.array_facts["netapp_luns_by_target"][target] if entry]:
+ if volume["lun"] == mapped_lun:
+ if volume["name"] != mapped_volume:
+ raise AnsibleError("Volume [%s] cannot be mapped to host or host group [%s] using lun number %s!"
+ % (volume["name"], volume["host"], volume["lun"]))
+ else: # volume is being remapped with the same lun number
+ self.remove_volume_mapping(mapped_volume, target)
+ lun = volume["lun"]
+ else:
+ lun = self.next_available_lun(used_luns)
+
+ mapping_info.append({"volume": volume["name"], "target": volume["host"], "lun": lun})
+ self.add_volume_mapping(volume["name"], volume["host"], lun)
+
+ else:
+ raise AnsibleError("The host or host group [%s] is not defined!" % volume["host"])
+
+ return mapping_info
+
+ def next_available_lun(self, used_luns):
+ """Find next available lun numbers."""
+ if self.access_volume_lun is not None:
+ used_luns.append(self.access_volume_lun)
+
+ lun = 1
+ while lun in used_luns:
+ lun += 1
+
+ return lun
+
+ def add_volume_mapping(self, name, host, lun):
+ """Add volume mapping to record table (luns_by_target)."""
+ # Find associated group and the groups hosts
+ for host_group in self.array_facts["netapp_host_groups"]:
+
+ if host == host_group["name"]:
+ # add to group
+ self.luns_by_target[host].append([name, lun])
+
+ # add to hosts
+ for hostgroup_host in host_group["hosts"]:
+ self.luns_by_target[hostgroup_host].append([name, lun])
+
+ break
+ else:
+ self.luns_by_target[host].append([name, lun])
+
+ def remove_volume_mapping(self, name, host):
+ """remove volume mapping to record table (luns_by_target)."""
+ # Find associated group and the groups hosts
+ for host_group in self.array_facts["netapp_host_groups"]:
+ if host == host_group["name"]:
+ # add to group
+ for entry in self.luns_by_target[host_group["name"]]:
+ if entry[0] == name:
+ del entry
+ # add to hosts
+ for hostgroup_host in host_group["hosts"]:
+ for entry in self.luns_by_target[hostgroup_host]:
+ if entry[0] == name:
+ del entry
+ break
+ else:
+ for index, entry in enumerate(self.luns_by_target[host]):
+ if entry[0] == name:
+ self.luns_by_target[host].pop(index)
+
+ def change_volume_mapping_lun(self, name, host, lun):
+ """remove volume mapping to record table (luns_by_target)."""
+ self.remove_volume_mapping(name, host)
+ self.add_volume_mapping(name, host, lun)
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_storage_pool.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_storage_pool.py
new file mode 100644
index 00000000..6a7b4d87
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_storage_pool.py
@@ -0,0 +1,81 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ lookup: santricity_sp_config
+ author: Nathan Swartz
+ short_description: Storage pool information
+ description:
+ - Retrieves storage pool information from the inventory
+"""
+import re
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+from itertools import product
+
+
+class LookupModule(LookupBase):
+ def run(self, inventory, state, **kwargs):
+ if isinstance(inventory, list):
+ inventory = inventory[0]
+
+ if ("eseries_storage_pool_configuration" not in inventory or not isinstance(inventory["eseries_storage_pool_configuration"], list) or
+ len(inventory["eseries_storage_pool_configuration"]) == 0):
+ return list()
+
+ defaults_state = "present"
+ if"eseries_remove_all_configuration_state" in inventory:
+ defaults_state = inventory["eseries_remove_all_configuration_state"]
+ elif "eseries_storage_pool_state" in inventory:
+ defaults_state = inventory["eseries_storage_pool_state"]
+
+ sp_list = list()
+ for sp_info in inventory["eseries_storage_pool_configuration"]:
+
+ if not isinstance(sp_info, dict) or "name" not in sp_info:
+ raise AnsibleError("eseries_storage_pool_configuration must contain a list of dictionaries containing the necessary information.")
+
+ for sp in patternize(sp_info["name"], inventory):
+ sp_options = {"state": defaults_state}
+
+ for option in sp_info.keys():
+ sp_options.update({option: sp_info[option]})
+
+ sp_options.update({"name": sp})
+
+ if sp_options["state"] == state:
+ sp_list.append(sp_options)
+
+ return sp_list
+
+
+def patternize(pattern, inventory, storage_pool=None):
+ """Generate list of strings determined by a pattern"""
+ if storage_pool:
+ pattern = pattern.replace("[pool]", storage_pool)
+
+ if inventory:
+ inventory_tokens = re.findall(r"\[[a-zA-Z0-9_]*\]", pattern)
+ for token in inventory_tokens:
+ pattern = pattern.replace(token, str(inventory[token[1:-1]]))
+
+ tokens = re.findall(r"\[[0-9]-[0-9]\]|\[[a-z]-[a-z]\]|\[[A-Z]-[A-Z]\]", pattern)
+ segments = "%s".join(re.split(r"\[[0-9]-[0-9]\]|\[[a-z]-[a-z]\]|\[[A-Z]-[A-Z]\]", pattern))
+
+ if len(tokens) == 0:
+ return [pattern]
+
+ combinations = []
+ for token in tokens:
+ start, stop = token[1:-1].split("-")
+
+ try:
+ start = int(start)
+ stop = int(stop)
+ combinations.append([str(number) for number in range(start, stop + 1)])
+ except ValueError:
+ combinations.append([chr(number) for number in range(ord(start), ord(stop) + 1)])
+
+ return [segments % subset for subset in list(product(*combinations))]
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_volume.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_volume.py
new file mode 100644
index 00000000..8f34d170
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/lookup/santricity_volume.py
@@ -0,0 +1,91 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleError
+from itertools import product
+
+
+class LookupModule(LookupBase):
+
+ def run(self, inventory, **kwargs):
+ if isinstance(inventory, list):
+ inventory = inventory[0]
+
+ if ("eseries_storage_pool_configuration" not in inventory.keys() or not isinstance(inventory["eseries_storage_pool_configuration"], list) or
+ len(inventory["eseries_storage_pool_configuration"]) == 0):
+ return list()
+
+ vol_list = list()
+ for sp_info in inventory["eseries_storage_pool_configuration"]:
+
+ if "name" not in sp_info.keys() or "volumes" not in sp_info.keys():
+ continue
+
+ if not isinstance(sp_info["volumes"], list):
+ raise AnsibleError("Volumes must be a list")
+
+ for sp in patternize(sp_info["name"], inventory):
+ for vol_info in sp_info["volumes"]:
+
+ if not isinstance(vol_info, dict):
+ raise AnsibleError("Volume in the storage pool, %s, must be a dictionary." % sp_info["name"])
+
+ for vol in patternize(vol_info["name"], inventory, storage_pool=sp):
+ vol_options = dict()
+
+ # Add common_volume_configuration information
+ combined_volume_metadata = {}
+ if "common_volume_configuration" in sp_info:
+ for option, value in sp_info["common_volume_configuration"].items():
+ vol_options.update({option: value})
+ if "volume_metadata" in sp_info["common_volume_configuration"].keys():
+ combined_volume_metadata.update(sp_info["common_volume_configuration"]["volume_metadata"])
+
+ # Add/update volume specific information
+ for option, value in vol_info.items():
+ vol_options.update({option: value})
+ if "volume_metadata" in vol_info.keys():
+ combined_volume_metadata.update(vol_info["volume_metadata"])
+ vol_options.update({"volume_metadata": combined_volume_metadata})
+
+
+ if "state" in sp_info and sp_info["state"] == "absent":
+ vol_options.update({"state": "absent"})
+
+ vol_options.update({"name": vol, "storage_pool_name": sp})
+ vol_list.append(vol_options)
+ return vol_list
+
+
+def patternize(pattern, inventory, storage_pool=None):
+ """Generate list of strings determined by a pattern"""
+ if storage_pool:
+ pattern = pattern.replace("[pool]", storage_pool)
+
+ if inventory:
+ inventory_tokens = re.findall(r"\[[a-zA-Z0-9_]*\]", pattern)
+ for token in inventory_tokens:
+ pattern = pattern.replace(token, str(inventory[token[1:-1]]))
+
+ tokens = re.findall(r"\[[0-9]-[0-9]\]|\[[a-z]-[a-z]\]|\[[A-Z]-[A-Z]\]", pattern)
+ segments = "%s".join(re.split(r"\[[0-9]-[0-9]\]|\[[a-z]-[a-z]\]|\[[A-Z]-[A-Z]\]", pattern))
+
+ if len(tokens) == 0:
+ return [pattern]
+
+ combinations = []
+ for token in tokens:
+ start, stop = token[1:-1].split("-")
+
+ try:
+ start = int(start)
+ stop = int(stop)
+ combinations.append([str(number) for number in range(start, stop + 1)])
+ except ValueError:
+ combinations.append([chr(number) for number in range(ord(start), ord(stop) + 1)])
+
+ return [segments % subset for subset in list(product(*combinations))]
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/module_utils/netapp.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/module_utils/netapp.py
new file mode 100644
index 00000000..b87e6595
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/module_utils/netapp.py
@@ -0,0 +1,746 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017, Sumit Kumar <sumit4@netapp.com>
+# Copyright (c) 2017, Michael Price <michael.price@netapp.com>
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import os
+import random
+import mimetypes
+
+from pprint import pformat
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils._text import to_native
+
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+try:
+ from netapp_lib.api.zapi import zapi
+ HAS_NETAPP_LIB = True
+except ImportError:
+ HAS_NETAPP_LIB = False
+
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+import ssl
+try:
+ from urlparse import urlparse, urlunparse
+except ImportError:
+ from urllib.parse import urlparse, urlunparse
+
+
+HAS_SF_SDK = False
+SF_BYTE_MAP = dict(
+ # Management GUI displays 1024 ** 3 as 1.1 GB, thus use 1000.
+ bytes=1,
+ b=1,
+ kb=1000,
+ mb=1000 ** 2,
+ gb=1000 ** 3,
+ tb=1000 ** 4,
+ pb=1000 ** 5,
+ eb=1000 ** 6,
+ zb=1000 ** 7,
+ yb=1000 ** 8
+)
+
+POW2_BYTE_MAP = dict(
+ # Here, 1 kb = 1024
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+)
+
+try:
+ from solidfire.factory import ElementFactory
+ from solidfire.custom.models import TimeIntervalFrequency
+ from solidfire.models import Schedule, ScheduleInfo
+
+ HAS_SF_SDK = True
+except Exception:
+ HAS_SF_SDK = False
+
+
+def has_netapp_lib():
+ return HAS_NETAPP_LIB
+
+
+def has_sf_sdk():
+ return HAS_SF_SDK
+
+
+def na_ontap_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True),
+ https=dict(required=False, type='bool', default=False),
+ validate_certs=dict(required=False, type='bool', default=True),
+ http_port=dict(required=False, type='int'),
+ ontapi=dict(required=False, type='int'),
+ use_rest=dict(required=False, type='str', default='Auto', choices=['Never', 'Always', 'Auto'])
+ )
+
+
+def ontap_sf_host_argument_spec():
+
+ return dict(
+ hostname=dict(required=True, type='str'),
+ username=dict(required=True, type='str', aliases=['user']),
+ password=dict(required=True, type='str', aliases=['pass'], no_log=True)
+ )
+
+
+def aws_cvs_host_argument_spec():
+
+ return dict(
+ api_url=dict(required=True, type='str'),
+ validate_certs=dict(required=False, type='bool', default=True),
+ api_key=dict(required=True, type='str'),
+ secret_key=dict(required=True, type='str')
+ )
+
+
+def create_sf_connection(module, port=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_SF_SDK and hostname and username and password:
+ try:
+ return_val = ElementFactory.create(hostname, username, password, port=port)
+ return return_val
+ except Exception:
+ raise Exception("Unable to create SF connection")
+ else:
+ module.fail_json(msg="the python SolidFire SDK module is required")
+
+
+def setup_na_ontap_zapi(module, vserver=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ https = module.params['https']
+ validate_certs = module.params['validate_certs']
+ port = module.params['http_port']
+ version = module.params['ontapi']
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ if version:
+ minor = version
+ else:
+ minor = 110
+ server.set_api_version(major=1, minor=minor)
+ # default is HTTP
+ if https:
+ if port is None:
+ port = 443
+ transport_type = 'HTTPS'
+ # HACK to bypass certificate verification
+ if validate_certs is False:
+ if not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ else:
+ if port is None:
+ port = 80
+ transport_type = 'HTTP'
+ server.set_transport_type(transport_type)
+ server.set_port(port)
+ server.set_server_type('FILER')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def setup_ontap_zapi(module, vserver=None):
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ if HAS_NETAPP_LIB:
+ # set up zapi
+ server = zapi.NaServer(hostname)
+ server.set_username(username)
+ server.set_password(password)
+ if vserver:
+ server.set_vserver(vserver)
+ # Todo : Replace hard-coded values with configurable parameters.
+ server.set_api_version(major=1, minor=110)
+ server.set_port(80)
+ server.set_server_type('FILER')
+ server.set_transport_type('HTTP')
+ return server
+ else:
+ module.fail_json(msg="the python NetApp-Lib module is required")
+
+
+def eseries_host_argument_spec():
+ """Retrieve a base argument specification common to all NetApp E-Series modules"""
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=False, default='1'),
+ validate_certs=dict(type='bool', required=False, default=True)
+ ))
+ return argument_spec
+
+
+class NetAppESeriesModule(object):
+ """Base class for all NetApp E-Series modules.
+
+ Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
+ verification, http requests, secure http redirection for embedded web services, and logging setup.
+
+ Be sure to add the following lines in the module's documentation section:
+ extends_documentation_fragment:
+ - netapp.eseries
+
+ :param dict(dict) ansible_options: dictionary of ansible option definitions
+ :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
+ :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
+ :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
+ :param list(list) required_if: list containing list(s) containing the option, the option value, and then
+ a list of required options. (optional)
+ :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
+ :param list(list) required_together: list containing list(s) of options that are required together. (optional)
+ :param bool log_requests: controls whether to log each request (default: True)
+ """
+ DEFAULT_TIMEOUT = 60
+ DEFAULT_SECURE_PORT = "8443"
+ DEFAULT_REST_API_PATH = "devmgr/v2/"
+ DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
+ DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
+ "netapp-client-type": "Ansible-%s" % ansible_version}
+ HTTP_AGENT = "Ansible / %s" % ansible_version
+ SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
+ pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
+
+ def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
+ mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
+ log_requests=True):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(ansible_options)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive, required_if=required_if,
+ required_one_of=required_one_of, required_together=required_together)
+
+ args = self.module.params
+ self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.log_requests = log_requests
+ self.creds = dict(url_username=args["api_username"],
+ url_password=args["api_password"],
+ validate_certs=args["validate_certs"])
+
+ if not self.url.endswith("/"):
+ self.url += "/"
+
+ self.is_embedded_mode = None
+ self.is_web_services_valid_cache = None
+
+ def _check_web_services_version(self):
+ """Verify proxy or embedded web services meets minimum version required for module.
+
+ The minimum required web services version is evaluated against version supplied through the web services rest
+ api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
+
+ This helper function will update the supplied api url if secure http is not used for embedded web services
+
+ :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
+ """
+ if not self.is_web_services_valid_cache:
+
+ url_parts = urlparse(self.url)
+ if not url_parts.scheme or not url_parts.netloc:
+ self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
+
+ if url_parts.scheme not in ["http", "https"]:
+ self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
+
+ self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
+
+ if rc != 200:
+ self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
+ self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ major, minor, other, revision = data["version"].split(".")
+ minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
+
+ if not (major > minimum_major or
+ (major == minimum_major and minor > minimum_minor) or
+ (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
+ self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
+ " Version required: [%s]." % (data["version"], self.web_services_version))
+
+ self.module.log("Web services rest api version met the minimum required version.")
+ self.is_web_services_valid_cache = True
+
+ def is_embedded(self):
+ """Determine whether web services server is the embedded web services.
+
+ If web services about endpoint fails based on an URLError then the request will be attempted again using
+ secure http.
+
+ :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
+ :return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
+ """
+ self._check_web_services_version()
+
+ if self.is_embedded_mode is None:
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ self.is_embedded_mode = not data["runningAsProxy"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return self.is_embedded_mode
+
+ def request(self, path, data=None, method='GET', headers=None, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response.
+
+ :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
+ full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
+ :param data: data required for the request (data may be json or any python structured data)
+ :param str method: request method such as GET, POST, DELETE.
+ :param dict headers: dictionary containing request headers.
+ :param bool ignore_errors: forces the request to ignore any raised exceptions.
+ """
+ self._check_web_services_version()
+
+ if headers is None:
+ headers = self.DEFAULT_HEADERS
+
+ if not isinstance(data, str) and headers["Content-Type"] == "application/json":
+ data = json.dumps(data)
+
+ if path.startswith("/"):
+ path = path[1:]
+ request_url = self.url + self.DEFAULT_REST_API_PATH + path
+
+ if self.log_requests or True:
+ self.module.log(pformat(dict(url=request_url, data=data, method=method)))
+
+ return request(url=request_url, data=data, method=method, headers=headers, use_proxy=True, force=False, last_mod_time=None,
+ timeout=self.DEFAULT_TIMEOUT, http_agent=self.HTTP_AGENT, force_basic_auth=True, ignore_errors=ignore_errors, **self.creds)
+
+
+def create_multipart_formdata(files, fields=None, send_8kb=False):
+ """Create the data for a multipart/form request.
+
+ :param list(list) files: list of lists each containing (name, filename, path).
+ :param list(list) fields: list of lists each containing (key, value).
+ :param bool send_8kb: only sends the first 8kb of the files (default: False).
+ """
+ boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
+ data_parts = list()
+ data = None
+
+ if six.PY2: # Generate payload for Python 2
+ newline = "\r\n"
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"' % key,
+ "",
+ value])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
+ "Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
+ "",
+ value])
+ data_parts.extend(["--%s--" % boundary, ""])
+ data = newline.join(data_parts)
+
+ else:
+ newline = six.b("\r\n")
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"' % key),
+ six.b(""),
+ six.b(value)])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
+ six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
+ six.b(""),
+ value])
+ data_parts.extend([six.b("--%s--" % boundary), b""])
+ data = newline.join(data_parts)
+
+ headers = {
+ "Content-Type": "multipart/form-data; boundary=%s" % boundary,
+ "Content-Length": str(len(data))}
+
+ return headers, data
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response."""
+
+ if headers is None:
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
+
+ if not http_agent:
+ http_agent = "Ansible / %s" % ansible_version
+
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def ems_log_event(source, server, name="Ansible", id="12345", version=ansible_version,
+ category="Information", event="setup", autosupport="false"):
+ ems_log = zapi.NaElement('ems-autosupport-log')
+ # Host name invoking the API.
+ ems_log.add_new_child("computer-name", name)
+ # ID of event. A user defined event-id, range [0..2^32-2].
+ ems_log.add_new_child("event-id", id)
+ # Name of the application invoking the API.
+ ems_log.add_new_child("event-source", source)
+ # Version of application invoking the API.
+ ems_log.add_new_child("app-version", version)
+ # Application defined category of the event.
+ ems_log.add_new_child("category", category)
+ # Description of event to log. An application defined message to log.
+ ems_log.add_new_child("event-description", event)
+ ems_log.add_new_child("log-level", "6")
+ ems_log.add_new_child("auto-support", autosupport)
+ server.invoke_successfully(ems_log, True)
+
+
+def get_cserver_zapi(server):
+ vserver_info = zapi.NaElement('vserver-get-iter')
+ query_details = zapi.NaElement.create_node_with_children('vserver-info', **{'vserver-type': 'admin'})
+ query = zapi.NaElement('query')
+ query.add_child_elem(query_details)
+ vserver_info.add_child_elem(query)
+ result = server.invoke_successfully(vserver_info,
+ enable_tunneling=False)
+ attribute_list = result.get_child_by_name('attributes-list')
+ vserver_list = attribute_list.get_child_by_name('vserver-info')
+ return vserver_list.get_child_content('vserver-name')
+
+
+def get_cserver(connection, is_rest=False):
+ if not is_rest:
+ return get_cserver_zapi(connection)
+
+ params = {'fields': 'type'}
+ api = "private/cli/vserver"
+ json, error = connection.get(api, params)
+ if json is None or error is not None:
+ # exit if there is an error or no data
+ return None
+ vservers = json.get('records')
+ if vservers is not None:
+ for vserver in vservers:
+ if vserver['type'] == 'admin': # cluster admin
+ return vserver['vserver']
+ if len(vservers) == 1: # assume vserver admin
+ return vservers[0]['vserver']
+
+ return None
+
+
+class OntapRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.username = self.module.params['username']
+ self.password = self.module.params['password']
+ self.hostname = self.module.params['hostname']
+ self.use_rest = self.module.params['use_rest']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.hostname + '/api/'
+ self.errors = list()
+ self.debug_logs = list()
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None, return_status_code=False):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+ except ValueError:
+ return None, None
+ error = json.get('error')
+ return json, error
+
+ try:
+ response = requests.request(method, url, verify=self.verify, auth=(self.username, self.password), params=params, timeout=self.timeout, json=json)
+ content = response.content # for debug purposes
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ response.raise_for_status()
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ self.log_error(status_code, 'HTTP error: %s' % err)
+ error_details = str(err)
+ # If an error was reported in the json payload, it is handled below
+ except requests.exceptions.ConnectionError as err:
+ self.log_error(status_code, 'Connection error: %s' % err)
+ error_details = str(err)
+ except Exception as err:
+ self.log_error(status_code, 'Other error: %s' % err)
+ error_details = str(err)
+ if json_error is not None:
+ self.log_error(status_code, 'Endpoint error: %d: %s' % (status_code, json_error))
+ error_details = json_error
+ self.log_debug(status_code, content)
+ if return_status_code:
+ return status_code, error_details
+ return json_dict, error_details
+
+ def get(self, api, params):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def _is_rest(self, used_unsupported_rest_properties=None):
+ if self.use_rest == "Always":
+ if used_unsupported_rest_properties:
+ error = "REST API currently does not support '%s'" % \
+ ', '.join(used_unsupported_rest_properties)
+ return True, error
+ else:
+ return True, None
+ if self.use_rest == 'Never' or used_unsupported_rest_properties:
+ # force ZAPI if requested or if some parameter requires it
+ return False, None
+ method = 'HEAD'
+ api = 'cluster/software'
+ status_code, __ = self.send_request(method, api, params=None, return_status_code=True)
+ if status_code == 200:
+ return True, None
+ return False, None
+
+ def is_rest(self, used_unsupported_rest_properties=None):
+ ''' only return error if there is a reason to '''
+ use_rest, error = self._is_rest(used_unsupported_rest_properties)
+ if used_unsupported_rest_properties is None:
+ return use_rest
+ return use_rest, error
+
+ def log_error(self, status_code, message):
+ self.errors.append(message)
+ self.debug_logs.append((status_code, message))
+
+ def log_debug(self, status_code, content):
+ self.debug_logs.append((status_code, content))
+
+
+class AwsCvsRestAPI(object):
+ def __init__(self, module, timeout=60):
+ self.module = module
+ self.api_key = self.module.params['api_key']
+ self.secret_key = self.module.params['secret_key']
+ self.api_url = self.module.params['api_url']
+ self.verify = self.module.params['validate_certs']
+ self.timeout = timeout
+ self.url = 'https://' + self.api_url + '/v1/'
+ self.check_required_library()
+
+ def check_required_library(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'))
+
+ def send_request(self, method, api, params, json=None):
+ ''' send http request and process reponse, including error conditions '''
+ url = self.url + api
+ status_code = None
+ content = None
+ json_dict = None
+ json_error = None
+ error_details = None
+ headers = {
+ 'Content-type': "application/json",
+ 'api-key': self.api_key,
+ 'secret-key': self.secret_key,
+ 'Cache-Control': "no-cache",
+ }
+
+ def get_json(response):
+ ''' extract json, and error message if present '''
+ try:
+ json = response.json()
+
+ except ValueError:
+ return None, None
+ success_code = [200, 201, 202]
+ if response.status_code not in success_code:
+ error = json.get('message')
+ else:
+ error = None
+ return json, error
+ try:
+ response = requests.request(method, url, headers=headers, timeout=self.timeout, json=json)
+ status_code = response.status_code
+ # If the response was successful, no Exception will be raised
+ json_dict, json_error = get_json(response)
+ except requests.exceptions.HTTPError as err:
+ __, json_error = get_json(response)
+ if json_error is None:
+ error_details = str(err)
+ except requests.exceptions.ConnectionError as err:
+ error_details = str(err)
+ except Exception as err:
+ error_details = str(err)
+ if json_error is not None:
+ error_details = json_error
+
+ return json_dict, error_details
+
+ # If an error was reported in the json payload, it is handled below
+ def get(self, api, params=None):
+ method = 'GET'
+ return self.send_request(method, api, params)
+
+ def post(self, api, data, params=None):
+ method = 'POST'
+ return self.send_request(method, api, params, json=data)
+
+ def patch(self, api, data, params=None):
+ method = 'PATCH'
+ return self.send_request(method, api, params, json=data)
+
+ def put(self, api, data, params=None):
+ method = 'PUT'
+ return self.send_request(method, api, params, json=data)
+
+ def delete(self, api, data, params=None):
+ method = 'DELETE'
+ return self.send_request(method, api, params, json=data)
+
+ def get_state(self, jobId):
+ """ Method to get the state of the job """
+ method = 'GET'
+ response, status_code = self.get('Jobs/%s' % jobId)
+ while str(response['state']) not in 'done':
+ response, status_code = self.get('Jobs/%s' % jobId)
+ return 'done'
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/module_utils/santricity.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/module_utils/santricity.py
new file mode 100644
index 00000000..0e890001
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/module_utils/santricity.py
@@ -0,0 +1,431 @@
+# (c) 2020, NetApp, Inc
+# BSD-3 Clause (see COPYING or https://opensource.org/licenses/BSD-3-Clause)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import random
+import mimetypes
+
+from pprint import pformat
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils._text import to_native
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+try:
+ from urlparse import urlparse, urlunparse
+except ImportError:
+ from urllib.parse import urlparse, urlunparse
+
+
+def eseries_host_argument_spec():
+ """Retrieve a base argument specification common to all NetApp E-Series modules"""
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type="str", required=True),
+ api_password=dict(type="str", required=True, no_log=True),
+ api_url=dict(type="str", required=True),
+ ssid=dict(type="str", required=False, default="1"),
+ validate_certs=dict(type="bool", required=False, default=True)
+ ))
+ return argument_spec
+
+
+def eseries_proxy_argument_spec():
+ """Retrieve a base argument specification common to all NetApp E-Series modules for proxy specific tasks"""
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type="str", required=True),
+ api_password=dict(type="str", required=True, no_log=True),
+ api_url=dict(type="str", required=True),
+ validate_certs=dict(type="bool", required=False, default=True)
+ ))
+ return argument_spec
+
+
+class NetAppESeriesModule(object):
+ """Base class for all NetApp E-Series modules.
+
+ Provides a set of common methods for NetApp E-Series modules, including version checking, mode (proxy, embedded)
+ verification, http requests, secure http redirection for embedded web services, and logging setup.
+
+ Be sure to add the following lines in the module's documentation section:
+ extends_documentation_fragment:
+ - santricity
+
+ :param dict(dict) ansible_options: dictionary of ansible option definitions
+ :param str web_services_version: minimally required web services rest api version (default value: "02.00.0000.0000")
+ :param bool supports_check_mode: whether the module will support the check_mode capabilities (default=False)
+ :param list(list) mutually_exclusive: list containing list(s) of mutually exclusive options (optional)
+ :param list(list) required_if: list containing list(s) containing the option, the option value, and then a list of required options. (optional)
+ :param list(list) required_one_of: list containing list(s) of options for which at least one is required. (optional)
+ :param list(list) required_together: list containing list(s) of options that are required together. (optional)
+ :param bool log_requests: controls whether to log each request (default: True)
+ :param bool proxy_specific_task: controls whether ssid is a default option (default: False)
+ """
+ DEFAULT_TIMEOUT = 300
+ DEFAULT_SECURE_PORT = "8443"
+ DEFAULT_BASE_PATH = "devmgr/"
+ DEFAULT_REST_API_PATH = "devmgr/v2/"
+ DEFAULT_REST_API_ABOUT_PATH = "devmgr/utils/about"
+ DEFAULT_HEADERS = {"Content-Type": "application/json", "Accept": "application/json",
+ "netapp-client-type": "Ansible-%s" % ansible_version}
+ HTTP_AGENT = "Ansible / %s" % ansible_version
+ SIZE_UNIT_MAP = dict(bytes=1, b=1, kb=1024, mb=1024**2, gb=1024**3, tb=1024**4,
+ pb=1024**5, eb=1024**6, zb=1024**7, yb=1024**8)
+
+ HOST_TYPE_INDEXES = {"aix mpio": 9, "avt 4m": 5, "hp-ux": 15, "linux atto": 24, "linux dm-mp": 28, "linux pathmanager": 25, "solaris 10 or earlier": 2,
+ "solaris 11 or later": 17, "svc": 18, "ontap": 26, "mac": 22, "vmware": 10, "windows": 1, "windows atto": 23, "windows clustered": 8}
+
+ def __init__(self, ansible_options, web_services_version=None, supports_check_mode=False,
+ mutually_exclusive=None, required_if=None, required_one_of=None, required_together=None,
+ log_requests=True, proxy_specific_task=False):
+
+ if proxy_specific_task:
+ argument_spec = eseries_proxy_argument_spec()
+ else:
+ argument_spec = eseries_host_argument_spec()
+
+ argument_spec.update(ansible_options)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive, required_if=required_if,
+ required_one_of=required_one_of, required_together=required_together)
+
+ args = self.module.params
+ self.web_services_version = web_services_version if web_services_version else "02.00.0000.0000"
+
+ if proxy_specific_task:
+ self.ssid = "0"
+ else:
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.log_requests = log_requests
+ self.creds = dict(url_username=args["api_username"],
+ url_password=args["api_password"],
+ validate_certs=args["validate_certs"])
+
+ if not self.url.endswith("/"):
+ self.url += "/"
+
+ self.is_proxy_used_cache = None
+ self.is_embedded_available_cache = None
+ self.is_web_services_valid_cache = None
+
+ def _check_web_services_version(self):
+ """Verify proxy or embedded web services meets minimum version required for module.
+
+ The minimum required web services version is evaluated against version supplied through the web services rest
+ api. AnsibleFailJson exception will be raised when the minimum is not met or exceeded.
+
+ This helper function will update the supplied api url if secure http is not used for embedded web services
+
+ :raise AnsibleFailJson: raised when the contacted api service does not meet the minimum required version.
+ """
+ if not self.is_web_services_valid_cache:
+
+ url_parts = urlparse(self.url)
+ if not url_parts.scheme or not url_parts.netloc:
+ self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
+
+ if url_parts.scheme not in ["http", "https"]:
+ self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
+
+ self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, force_basic_auth=False, **self.creds)
+
+ if rc != 200:
+ self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
+ self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ if len(data["version"].split(".")) == 4:
+ major, minor, other, revision = data["version"].split(".")
+ minimum_major, minimum_minor, other, minimum_revision = self.web_services_version.split(".")
+
+ if not (major > minimum_major or
+ (major == minimum_major and minor > minimum_minor) or
+ (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
+ self.module.fail_json(msg="Web services version does not meet minimum version required. Current version: [%s]."
+ " Version required: [%s]." % (data["version"], self.web_services_version))
+ self.module.log("Web services rest api version met the minimum required version.")
+ else:
+ self.module.warn("Web services rest api version unknown!")
+
+ self.is_web_services_valid_cache = True
+
+ def is_web_services_version_met(self, version):
+ """Determines whether a particular web services version has been satisfied."""
+ split_version = version.split(".")
+ if len(split_version) != 4 or not split_version[0].isdigit() or not split_version[1].isdigit() or not split_version[3].isdigit():
+ self.module.fail_json(msg="Version is not a valid Web Services version. Version [%s]." % version)
+
+ url_parts = urlparse(self.url)
+ if not url_parts.scheme or not url_parts.netloc:
+ self.module.fail_json(msg="Failed to provide valid API URL. Example: https://192.168.1.100:8443/devmgr/v2. URL [%s]." % self.url)
+
+ if url_parts.scheme not in ["http", "https"]:
+ self.module.fail_json(msg="Protocol must be http or https. URL [%s]." % self.url)
+
+ self.url = "%s://%s/" % (url_parts.scheme, url_parts.netloc)
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, ignore_errors=True, **self.creds)
+
+ if rc != 200:
+ self.module.warn("Failed to retrieve web services about information! Retrying with secure ports. Array Id [%s]." % self.ssid)
+ self.url = "https://%s:8443/" % url_parts.netloc.split(":")[0]
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if len(data["version"].split(".")) == 4:
+ major, minor, other, revision = data["version"].split(".")
+ minimum_major, minimum_minor, other, minimum_revision = split_version
+ if not (major > minimum_major or
+ (major == minimum_major and minor > minimum_minor) or
+ (major == minimum_major and minor == minimum_minor and revision >= minimum_revision)):
+ return False
+ else:
+ return False
+ return True
+
+ def is_embedded_available(self):
+ """Determine whether the storage array has embedded services available."""
+ self._check_web_services_version()
+
+ if self.is_embedded_available_cache is None:
+
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ self.is_embedded_available_cache = False
+ else:
+ try:
+ rc, bundle = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/extendedSAData/codeVersions[codeModule='bundle']"
+ % self.ssid)
+ self.is_embedded_available_cache = False
+ if bundle:
+ self.is_embedded_available_cache = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve information about storage system [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ else: # Contacted using embedded web services
+ self.is_embedded_available_cache = True
+
+ self.module.log("embedded_available: [%s]" % ("True" if self.is_embedded_available_cache else "False"))
+ return self.is_embedded_available_cache
+
+ def is_embedded(self):
+ """Determine whether web services server is the embedded web services."""
+ return not self.is_proxy()
+
+ def is_proxy(self):
+ """Determine whether web services server is the proxy web services.
+
+ :raise AnsibleFailJson: raised when web services about endpoint failed to be contacted.
+ :return bool: whether contacted web services is running from storage array (embedded) or from a proxy.
+ """
+ self._check_web_services_version()
+
+ if self.is_proxy_used_cache is None:
+ about_url = self.url + self.DEFAULT_REST_API_ABOUT_PATH
+ try:
+ rc, data = request(about_url, timeout=self.DEFAULT_TIMEOUT, headers=self.DEFAULT_HEADERS, force_basic_auth=False, **self.creds)
+ self.is_proxy_used_cache = data["runningAsProxy"]
+
+ self.module.log("proxy: [%s]" % ("True" if self.is_proxy_used_cache else "False"))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return self.is_proxy_used_cache
+
+ def request(self, path, rest_api_path=DEFAULT_REST_API_PATH, rest_api_url=None, data=None, method='GET', headers=None, ignore_errors=False, timeout=None,
+ force_basic_auth=True, log_request=None):
+ """Issue an HTTP request to a url, retrieving an optional JSON response.
+
+ :param str path: web services rest api endpoint path (Example: storage-systems/1/graph). Note that when the
+ full url path is specified then that will be used without supplying the protocol, hostname, port and rest path.
+ :param str rest_api_path: override the class DEFAULT_REST_API_PATH which is used to build the request URL.
+ :param str rest_api_url: override the class url member which contains the base url for web services.
+ :param data: data required for the request (data may be json or any python structured data)
+ :param str method: request method such as GET, POST, DELETE.
+ :param dict headers: dictionary containing request headers.
+ :param bool ignore_errors: forces the request to ignore any raised exceptions.
+ :param int timeout: duration of seconds before request finally times out.
+ :param bool force_basic_auth: Ensure that basic authentication is being used.
+ :param bool log_request: Log the request and response
+ """
+ self._check_web_services_version()
+
+ if rest_api_url is None:
+ rest_api_url = self.url
+ if headers is None:
+ headers = self.DEFAULT_HEADERS
+ if timeout is None:
+ timeout = self.DEFAULT_TIMEOUT
+ if log_request is None:
+ log_request = self.log_requests
+
+ if not isinstance(data, str) and "Content-Type" in headers and headers["Content-Type"] == "application/json":
+ data = json.dumps(data)
+
+ if path.startswith("/"):
+ path = path[1:]
+ request_url = rest_api_url + rest_api_path + path
+
+ if log_request:
+ self.module.log(pformat(dict(url=request_url, data=data, method=method, headers=headers)))
+
+ response = self._request(url=request_url, data=data, method=method, headers=headers, last_mod_time=None, timeout=timeout,
+ http_agent=self.HTTP_AGENT, force_basic_auth=force_basic_auth, ignore_errors=ignore_errors, **self.creds)
+ if log_request:
+ self.module.log(pformat(response))
+
+ return response
+
+ @staticmethod
+ def _request(url, data=None, headers=None, method='GET', use_proxy=True, force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response."""
+
+ if headers is None:
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
+
+ if not http_agent:
+ http_agent = "Ansible / %s" % ansible_version
+
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
+ validate_certs=validate_certs, url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ rc = r.getcode()
+ response = r.read()
+ if response:
+ response = json.loads(response)
+
+ except HTTPError as error:
+ rc = error.code
+ response = error.fp.read()
+ try:
+ response = json.loads(response)
+ except Exception:
+ pass
+
+ if not ignore_errors:
+ raise Exception(rc, response)
+
+ return rc, response
+
+
+def create_multipart_formdata(files, fields=None, send_8kb=False):
+ """Create the data for a multipart/form request.
+
+ :param list(list) files: list of lists each containing (name, filename, path).
+ :param list(list) fields: list of lists each containing (key, value).
+ :param bool send_8kb: only sends the first 8kb of the files (default: False).
+ """
+ boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)])
+ data_parts = list()
+ data = None
+
+ if six.PY2: # Generate payload for Python 2
+ newline = "\r\n"
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"' % key,
+ "",
+ value])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend(["--%s" % boundary,
+ 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename),
+ "Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream"),
+ "",
+ value])
+ data_parts.extend(["--%s--" % boundary, ""])
+ data = newline.join(data_parts)
+
+ else:
+ newline = six.b("\r\n")
+ if fields is not None:
+ for key, value in fields:
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"' % key),
+ six.b(""),
+ six.b(value)])
+
+ for name, filename, path in files:
+ with open(path, "rb") as fh:
+ value = fh.read(8192) if send_8kb else fh.read()
+
+ data_parts.extend([six.b("--%s" % boundary),
+ six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)),
+ six.b("Content-Type: %s" % (mimetypes.guess_type(path)[0] or "application/octet-stream")),
+ six.b(""),
+ value])
+ data_parts.extend([six.b("--%s--" % boundary), b""])
+ data = newline.join(data_parts)
+
+ headers = {
+ "Content-Type": "multipart/form-data; boundary=%s" % boundary,
+ "Content-Length": str(len(data))}
+
+ return headers, data
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ """Issue an HTTP request to a url, retrieving an optional JSON response."""
+
+ if headers is None:
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ headers.update({"netapp-client-type": "Ansible-%s" % ansible_version})
+
+ if not http_agent:
+ http_agent = "Ansible / %s" % ansible_version
+
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py
new file mode 100644
index 00000000..68abc2fb
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_alerts
+short_description: NetApp E-Series manage email notification settings
+description:
+ - Certain E-Series systems have the capability to send email notifications on potentially critical events.
+ - This module will allow the owner of the system to specify email recipients for these messages.
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Enable/disable the sending of email-based alerts.
+ type: str
+ default: enabled
+ required: false
+ choices:
+ - enabled
+ - disabled
+ server:
+ description:
+ - A fully qualified domain name, IPv4 address, or IPv6 address of a mail server.
+ - To use a fully qualified domain name, you must configure a DNS server on both controllers using
+ M(netapp_eseries.santricity.na_santricity_mgmt_interface).
+ - Required when I(state=enabled).
+ type: str
+ required: false
+ sender:
+ description:
+ - This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account.
+ - Required when I(state=enabled).
+ type: str
+ required: false
+ contact:
+ description:
+ - Allows the owner to specify some free-form contact information to be included in the emails.
+ - This is typically utilized to provide a contact phone number.
+ type: str
+ required: false
+ recipients:
+ description:
+ - The email addresses that will receive the email notifications.
+ - Required when I(state=enabled).
+ type: list
+ required: false
+ test:
+ description:
+ - When a change is detected in the configuration, a test email will be sent.
+ - This may take a few minutes to process.
+ - Only applicable if I(state=enabled).
+ type: bool
+ default: false
+notes:
+ - Check mode is supported.
+ - Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples
+ of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical
+ events.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Enable email-based alerting
+ na_santricity_alerts:
+ state: enabled
+ sender: noreply@example.com
+ server: mail@example.com
+ contact: "Phone: 1-555-555-5555"
+ recipients:
+ - name1@example.com
+ - name2@example.com
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Disable alerting
+ na_santricity_alerts:
+ state: disabled
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+import re
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesAlerts(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
+ server=dict(type='str', required=False),
+ sender=dict(type='str', required=False),
+ contact=dict(type='str', required=False),
+ recipients=dict(type='list', required=False),
+ test=dict(type='bool', required=False, default=False))
+
+ required_if = [['state', 'enabled', ['server', 'sender', 'recipients']]]
+ super(NetAppESeriesAlerts, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.alerts = args['state'] == 'enabled'
+ self.server = args['server']
+ self.sender = args['sender']
+ self.contact = args['contact']
+ self.recipients = args['recipients']
+ self.test = args['test']
+ self.check_mode = self.module.check_mode
+
+ # Very basic validation on email addresses: xx@yy.zz
+ email = re.compile(r"[^@]+@[^@]+\.[^@]+")
+
+ if self.sender and not email.match(self.sender):
+ self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender)
+
+ if self.recipients is not None:
+ for recipient in self.recipients:
+ if not email.match(recipient):
+ self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient)
+
+ if len(self.recipients) < 1:
+ self.module.fail_json(msg="At least one recipient address must be specified.")
+
+ def get_configuration(self):
+ """Retrieve the current storage system alert settings."""
+ if self.is_proxy():
+ if self.is_embedded_available():
+ try:
+ rc, result = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts" % self.ssid)
+ return result
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ else:
+ self.module.fail_json(msg="Setting SANtricity alerts is only available from SANtricity Web Services Proxy if the storage system has"
+ " SANtricity Web Services Embedded available. Array [%s]." % self.ssid)
+ else:
+ try:
+ rc, result = self.request("storage-systems/%s/device-alerts" % self.ssid)
+ return result
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ """Update the storage system alert settings."""
+ config = self.get_configuration()
+ update = False
+ body = dict()
+
+ if self.alerts:
+ body = dict(alertingEnabled=True)
+ if not config['alertingEnabled']:
+ update = True
+
+ body.update(emailServerAddress=self.server)
+ if config['emailServerAddress'] != self.server:
+ update = True
+
+ body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True)
+ if self.contact and (self.contact != config['additionalContactInformation']
+ or not config['sendAdditionalContactInformation']):
+ update = True
+
+ body.update(emailSenderAddress=self.sender)
+ if config['emailSenderAddress'] != self.sender:
+ update = True
+
+ self.recipients.sort()
+ if config['recipientEmailAddresses']:
+ config['recipientEmailAddresses'].sort()
+
+ body.update(recipientEmailAddresses=self.recipients)
+ if config['recipientEmailAddresses'] != self.recipients:
+ update = True
+
+ elif config['alertingEnabled']:
+ body = {"alertingEnabled": False, "emailServerAddress": "", "emailSenderAddress": "", "sendAdditionalContactInformation": False,
+ "additionalContactInformation": "", "recipientEmailAddresses": []}
+ update = True
+
+ if update and not self.check_mode:
+ if self.is_proxy() and self.is_embedded_available():
+ try:
+ rc, result = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts" % self.ssid, method="POST", data=body)
+ except Exception as err:
+ self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ else:
+ try:
+ rc, result = self.request("storage-systems/%s/device-alerts" % self.ssid, method="POST", data=body)
+ except Exception as err:
+ self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ return update
+
+ def send_test_email(self):
+ """Send a test email to verify that the provided configuration is valid and functional."""
+ if not self.check_mode:
+ if self.is_proxy() and self.is_embedded_available():
+ try:
+ rc, resp = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts/alert-email-test" % self.ssid, method="POST")
+ if resp['response'] != 'emailSentOK':
+ self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]." % (resp['response'], self.ssid))
+ except Exception as err:
+ self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/device-alerts/alert-email-test" % self.ssid, method="POST")
+ if resp['response'] != 'emailSentOK':
+ self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]." % (resp['response'], self.ssid))
+ except Exception as err:
+ self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update(self):
+ update = self.update_configuration()
+
+ if self.test and update:
+ self.send_test_email()
+
+ if self.alerts:
+ msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender)
+ else:
+ msg = 'Alerting has been disabled.'
+
+ self.module.exit_json(msg=msg, changed=update)
+
+
+def main():
+ alerts = NetAppESeriesAlerts()
+ alerts.update()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py
new file mode 100644
index 00000000..e7c92557
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_alerts_syslog
+short_description: NetApp E-Series manage syslog servers receiving storage system alerts.
+description:
+ - Manage the list of syslog servers that will notifications on potentially critical events.
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ servers:
+ description:
+ - List of dictionaries where each dictionary contains a syslog server entry.
+ type: list
+ required: False
+ suboptions:
+ address:
+ description:
+ - Syslog server address can be a fully qualified domain name, IPv4 address, or IPv6 address.
+ required: true
+ port:
+ description:
+ - UDP Port must be a numerical value between 0 and 65535. Typically, the UDP Port for syslog is 514.
+ required: false
+ default: 514
+ test:
+ description:
+ - This forces a test syslog message to be sent to the stated syslog server.
+ - Test will only be issued when a change is made.
+ type: bool
+ default: false
+notes:
+ - Check mode is supported.
+ - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
+ SANtricity OS 11.40.2) and higher.
+"""
+
+EXAMPLES = """
+ - name: Add two syslog server configurations to NetApp E-Series storage array.
+ na_santricity_alerts_syslog:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ servers:
+ - address: "192.168.1.100"
+ - address: "192.168.2.100"
+ port: 514
+ - address: "192.168.3.100"
+ port: 1000
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesAlertsSyslog(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(servers=dict(type="list", required=False),
+ test=dict(type="bool", default=False, require=False))
+
+ required_if = [["state", "present", ["address"]]]
+ mutually_exclusive = [["test", "absent"]]
+ super(NetAppESeriesAlertsSyslog, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
+ args = self.module.params
+ if args["servers"] and len(args["servers"]) > 5:
+ self.module.fail_json(msg="Maximum number of syslog servers is 5! Array Id [%s]." % self.ssid)
+
+ self.servers = {}
+ if args["servers"] is not None:
+ for server in args["servers"]:
+ port = 514
+ if "port" in server:
+ port = server["port"]
+ self.servers.update({server["address"]: port})
+
+ self.test = args["test"]
+ self.check_mode = self.module.check_mode
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if not self.is_embedded() and self.ssid != 0:
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ def get_current_configuration(self):
+ """Retrieve existing alert-syslog configuration."""
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog" % ("1" if self.url_path_prefix else self.ssid))
+ return result
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def is_change_required(self):
+ """Determine whether changes are required."""
+ current_config = self.get_current_configuration()
+
+ # When syslog servers should exist, search for them.
+ if self.servers:
+ for entry in current_config["syslogReceivers"]:
+ if entry["serverName"] not in self.servers.keys() or entry["portNumber"] != self.servers[entry["serverName"]]:
+ return True
+
+ for server, port in self.servers.items():
+ for entry in current_config["syslogReceivers"]:
+ if server == entry["serverName"] and port == entry["portNumber"]:
+ break
+ else:
+ return True
+ return False
+
+ elif current_config["syslogReceivers"]:
+ return True
+
+ return False
+
+ def make_request_body(self):
+ """Generate the request body."""
+ body = {"syslogReceivers": [], "defaultFacility": 3, "defaultTag": "StorageArray"}
+
+ for server, port in self.servers.items():
+ body["syslogReceivers"].append({"serverName": server, "portNumber": port})
+
+ return body
+
+ def test_configuration(self):
+ """Send syslog test message to all systems (only option)."""
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog-test"
+ % ("1" if self.url_path_prefix else self.ssid), method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to send test message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update(self):
+ """Update configuration and respond to ansible."""
+ change_required = self.is_change_required()
+
+ if change_required and not self.check_mode:
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog" % ("1" if self.url_path_prefix else self.ssid),
+ method="POST", data=self.make_request_body())
+ except Exception as error:
+ self.module.fail_json(msg="Failed to add syslog server! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if self.test and self.servers:
+ self.test_configuration()
+
+ self.module.exit_json(msg="The syslog settings have been updated.", changed=change_required)
+
+
+def main():
+ settings = NetAppESeriesAlertsSyslog()
+ settings.update()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py
new file mode 100644
index 00000000..c6922d80
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_asup
+short_description: NetApp E-Series manage auto-support settings
+description:
+ - Allow the auto-support settings to be configured for an individual E-Series storage-system
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Enable/disable the E-Series auto-support configuration or maintenance mode.
+ - When this option is enabled, configuration, logs, and other support-related information will be relayed
+ to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
+ be collected.
+ - The maintenance state enables the maintenance window which allows maintenance activities to be performed on the storage array without
+ generating support cases.
+ - Maintenance mode cannot be enabled unless ASUP has previously been enabled.
+ type: str
+ default: enabled
+ choices:
+ - enabled
+ - disabled
+ - maintenance_enabled
+ - maintenance_disabled
+ active:
+ description:
+ - Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
+ possible that the bundle did not contain all of the required information at the time of the event.
+ Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
+ of support data in order ot resolve the problem.
+ - Only applicable if I(state=enabled).
+ default: true
+ type: bool
+ start:
+ description:
+ - A start hour may be specified in a range from 0 to 23 hours.
+ - ASUP bundles will be sent daily between the provided start and end time (UTC).
+ - I(start) must be less than I(end).
+ type: int
+ default: 0
+ end:
+ description:
+ - An end hour may be specified in a range from 1 to 24 hours.
+ - ASUP bundles will be sent daily between the provided start and end time (UTC).
+ - I(start) must be less than I(end).
+ type: int
+ default: 24
+ days:
+ description:
+ - A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
+ of the provided days.
+ type: list
+ choices:
+ - monday
+ - tuesday
+ - wednesday
+ - thursday
+ - friday
+ - saturday
+ - sunday
+ required: false
+ aliases:
+ - schedule_days
+ - days_of_week
+ method:
+ description:
+ - AutoSupport dispatch delivery method.
+ choices:
+ - https
+ - http
+ - email
+ type: str
+ required: false
+ default: https
+ routing_type:
+ description:
+ - AutoSupport routing
+ - Required when M(method==https or method==http).
+ choices:
+ - direct
+ - proxy
+ - script
+ type: str
+ default: direct
+ required: false
+ proxy:
+ description:
+ - Information particular to the proxy delivery method.
+ - Required when M((method==https or method==http) and routing_type==proxy).
+ type: dict
+ required: false
+ suboptions:
+ host:
+ description:
+ - Proxy host IP address or fully qualified domain name.
+ - Required when M(method==http or method==https) and M(routing_type==proxy).
+ type: str
+ required: false
+ port:
+ description:
+ - Proxy host port.
+ - Required when M(method==http or method==https) and M(routing_type==proxy).
+ type: str
+ required: false
+ script:
+ description:
+ - Path to the AutoSupport routing script file.
+ - Required when M(method==http or method==https) and M(routing_type==script).
+ type: str
+ required: false
+ email:
+ description:
+ - Information particular to the e-mail delivery method.
+ - Uses the SMTP protocol.
+ - Required when M(method==email).
+ type: dict
+ required: false
+ suboptions:
+ server:
+ description:
+ - Mail server's IP address or fully qualified domain name.
+ - Required when M(routing_type==email).
+ type: str
+ required: false
+ sender:
+ description:
+ - Sender's email account
+ - Required when M(routing_type==email).
+ type: str
+ required: false
+ test_recipient:
+ description:
+ - Test verification email
+ - Required when M(routing_type==email).
+ type: str
+ required: false
+ maintenance_duration:
+ description:
+ - The duration of time the ASUP maintenance mode will be active.
+ - Permittable range is between 1 and 72 hours.
+ - Required when I(state==maintenance_enabled).
+ type: int
+ default: 24
+ required: false
+ maintenance_emails:
+ description:
+ - List of email addresses for maintenance notifications.
+ - Required when I(state==maintenance_enabled).
+ type: list
+ required: false
+ validate:
+ description:
+ - Validate ASUP configuration.
+ type: bool
+ default: false
+ required: false
+notes:
+ - Check mode is supported.
+ - Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
+ respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
+ disabled if desired.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Enable ASUP and allow pro-active retrieval of bundles
+ na_santricity_asup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: enabled
+ active: true
+ days: ["saturday", "sunday"]
+ start: 17
+ end: 20
+ - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
+ na_santricity_asup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: disabled
+ - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
+ na_santricity_asup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ state: maintenance_enabled
+ maintenance_duration: 24
+ maintenance_emails:
+ - admin@example.com
+ - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
+ na_santricity_asup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: maintenance_disabled
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+asup:
+ description:
+ - True if ASUP is enabled.
+ returned: on success
+ sample: true
+ type: bool
+active:
+ description:
+ - True if the active option has been enabled.
+ returned: on success
+ sample: true
+ type: bool
+cfg:
+ description:
+ - Provide the full ASUP configuration.
+ returned: on success
+ type: complex
+ contains:
+ asupEnabled:
+ description:
+ - True if ASUP has been enabled.
+ type: bool
+ onDemandEnabled:
+ description:
+ - True if ASUP active monitoring has been enabled.
+ type: bool
+ daysOfWeek:
+ description:
+ - The days of the week that ASUP bundles will be sent.
+ type: list
+"""
+import time
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesAsup(NetAppESeriesModule):
+ DAYS_OPTIONS = ["sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"]
+
+ def __init__(self):
+
+ ansible_options = dict(
+ state=dict(type="str", required=False, default="enabled", choices=["enabled", "disabled", "maintenance_enabled", "maintenance_disabled"]),
+ active=dict(type="bool", required=False, default=True),
+ days=dict(type="list", required=False, aliases=["schedule_days", "days_of_week"], choices=self.DAYS_OPTIONS),
+ start=dict(type="int", required=False, default=0),
+ end=dict(type="int", required=False, default=24),
+ method=dict(type="str", required=False, choices=["https", "http", "email"], default="https"),
+ routing_type=dict(type="str", required=False, choices=["direct", "proxy", "script"], default="direct"),
+ proxy=dict(type="dict", required=False, options=dict(host=dict(type="str", required=False),
+ port=dict(type="str", required=False),
+ script=dict(type="str", required=False))),
+ email=dict(type="dict", required=False, options=dict(server=dict(type="str", required=False),
+ sender=dict(type="str", required=False),
+ test_recipient=dict(type="str", required=False))),
+ maintenance_duration=dict(type="int", required=False, default=24),
+ maintenance_emails=dict(type="list", required=False),
+ validate=dict(type="bool", require=False, default=False))
+
+ mutually_exclusive = [["host", "script"],
+ ["port", "script"]]
+
+ required_if = [["method", "https", ["routing_type"]],
+ ["method", "http", ["routing_type"]],
+ ["method", "email", ["email"]],
+ ["state", "maintenance_enabled", ["maintenance_duration", "maintenance_emails"]]]
+
+ super(NetAppESeriesAsup, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.active = args["active"]
+ self.days = args["days"]
+ self.start = args["start"]
+ self.end = args["end"]
+
+ self.method = args["method"]
+ self.routing_type = args["routing_type"] if args["routing_type"] else "none"
+ self.proxy = args["proxy"]
+ self.email = args["email"]
+ self.maintenance_duration = args["maintenance_duration"]
+ self.maintenance_emails = args["maintenance_emails"]
+ self.validate = args["validate"]
+
+ if self.validate and self.email and "test_recipient" not in self.email.keys():
+ self.module.fail_json(msg="test_recipient must be provided for validating email delivery method. Array [%s]" % self.ssid)
+
+ self.check_mode = self.module.check_mode
+
+ if self.start >= self.end:
+ self.module.fail_json(msg="The value provided for the start time is invalid."
+ " It must be less than the end time.")
+ if self.start < 0 or self.start > 23:
+ self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
+ else:
+ self.start = self.start * 60
+ if self.end < 1 or self.end > 24:
+ self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
+ else:
+ self.end = min(self.end * 60, 1439)
+
+ if self.maintenance_duration < 1 or self.maintenance_duration > 72:
+ self.module.fail_json(msg="The maintenance duration must be equal to or between 1 and 72 hours.")
+
+ if not self.days:
+ self.days = self.DAYS_OPTIONS
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if not self.is_embedded() and self.ssid != 0:
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ def get_configuration(self):
+ try:
+ rc, result = self.request(self.url_path_prefix + "device-asup")
+
+ if not (result["asupCapable"] and result["onDemandCapable"]):
+ self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % self.ssid)
+ return result
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def in_maintenance_mode(self):
+ """Determine whether storage device is currently in maintenance mode."""
+ results = False
+ try:
+ rc, key_values = self.request(self.url_path_prefix + "key-values")
+
+ for key_value in key_values:
+ if key_value["key"] == "ansible_asup_maintenance_email_list":
+ if not self.maintenance_emails:
+ self.maintenance_emails = key_value["value"].split(",")
+ elif key_value["key"] == "ansible_asup_maintenance_stop_time":
+ if time.time() < float(key_value["value"]):
+ results = True
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve maintenance windows information! Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return results
+
+ def update_configuration(self):
+ config = self.get_configuration()
+ update = False
+ body = dict()
+
+ if self.state == "enabled":
+ body = dict(asupEnabled=True)
+ if not config["asupEnabled"]:
+ update = True
+
+ if (config["onDemandEnabled"] and config["remoteDiagsEnabled"]) != self.active:
+ update = True
+ body.update(dict(onDemandEnabled=self.active,
+ remoteDiagsEnabled=self.active))
+ self.days.sort()
+ config["schedule"]["daysOfWeek"].sort()
+
+ body["schedule"] = dict(daysOfWeek=self.days,
+ dailyMinTime=self.start,
+ dailyMaxTime=self.end,
+ weeklyMinTime=self.start,
+ weeklyMaxTime=self.end)
+
+ if self.days != config["schedule"]["daysOfWeek"]:
+ update = True
+ if self.start != config["schedule"]["dailyMinTime"] or self.start != config["schedule"]["weeklyMinTime"]:
+ update = True
+ elif self.end != config["schedule"]["dailyMaxTime"] or self.end != config["schedule"]["weeklyMaxTime"]:
+ update = True
+
+ if self.method in ["https", "http"]:
+ if self.routing_type == "direct":
+ body["delivery"] = dict(method=self.method,
+ routingType="direct")
+ elif self.routing_type == "proxy":
+ body["delivery"] = dict(method=self.method,
+ proxyHost=self.proxy["host"],
+ proxyPort=self.proxy["port"],
+ routingType="proxyServer")
+ elif self.routing_type == "script":
+ body["delivery"] = dict(method=self.method,
+ proxyScript=self.proxy["script"],
+ routingType="proxyScript")
+
+ else:
+ body["delivery"] = dict(method="smtp",
+ mailRelayServer=self.email["server"],
+ mailSenderAddress=self.email["sender"],
+ routingType="none")
+
+ if config["delivery"]["method"] != body["delivery"]["method"]:
+ update = True
+ elif config["delivery"]["method"] in ["https", "http"]:
+ if config["delivery"]["routingType"] != body["delivery"]["routingType"]:
+ update = True
+ elif (config["delivery"]["routingType"] == "proxy" and
+ config["delivery"]["proxyHost"] != body["delivery"]["proxyHost"] and
+ config["delivery"]["proxyPort"] != body["delivery"]["proxyPort"]):
+ update = True
+ elif config["delivery"]["routingType"] == "script" and config["delivery"]["proxyScript"] != body["delivery"]["proxyScript"]:
+ update = True
+ elif (config["delivery"]["method"] == "smtp" and
+ config["delivery"]["mailRelayServer"] != body["delivery"]["mailRelayServer"] and
+ config["delivery"]["mailSenderAddress"] != body["delivery"]["mailSenderAddress"]):
+ update = True
+
+ if self.in_maintenance_mode():
+ update = True
+
+ elif self.state == "disabled":
+ if config["asupEnabled"]: # Disable asupEnable is asup is disabled.
+ body = dict(asupEnabled=False)
+ update = True
+
+ else:
+ if not config["asupEnabled"]:
+ self.module.fail_json(msg="AutoSupport must be enabled before enabling or disabling maintenance mode. Array [%s]." % self.ssid)
+
+ if self.in_maintenance_mode() or self.state == "maintenance_enabled":
+ update = True
+
+ if update and not self.check_mode:
+ if self.state == "maintenance_enabled":
+ try:
+ rc, response = self.request(self.url_path_prefix + "device-asup/maintenance-window", method="POST",
+ data=dict(maintenanceWindowEnabled=True,
+ duration=self.maintenance_duration,
+ emailAddresses=self.maintenance_emails))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to enabled ASUP maintenance window. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Add maintenance information to the key-value store
+ try:
+ rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_email_list", method="POST",
+ data=",".join(self.maintenance_emails))
+ rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_stop_time", method="POST",
+ data=str(time.time() + 60 * 60 * self.maintenance_duration))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to store maintenance information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ elif self.state == "maintenance_disabled":
+ try:
+ rc, response = self.request(self.url_path_prefix + "device-asup/maintenance-window", method="POST",
+ data=dict(maintenanceWindowEnabled=False,
+ emailAddresses=self.maintenance_emails))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to disable ASUP maintenance window. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Remove maintenance information to the key-value store
+ try:
+ rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_email_list", method="DELETE")
+ rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_stop_time", method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to store maintenance information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ else:
+ if body["asupEnabled"] and self.validate:
+ validate_body = dict(delivery=body["delivery"])
+ if self.email:
+ validate_body["mailReplyAddress"] = self.email["test_recipient"]
+
+ try:
+ rc, response = self.request(self.url_path_prefix + "device-asup/verify-config", timeout=600, method="POST", data=validate_body)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to validate ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ try:
+ rc, response = self.request(self.url_path_prefix + "device-asup", method="POST", data=body)
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="Failed to change ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ return update
+
+ def apply(self):
+ update = self.update_configuration()
+ cfg = self.get_configuration()
+
+ if update:
+ self.module.exit_json(msg="The ASUP settings have been updated.", changed=update, asup=cfg["asupEnabled"], active=cfg["onDemandEnabled"], cfg=cfg)
+ else:
+ self.module.exit_json(msg="No ASUP changes required.", changed=update, asup=cfg["asupEnabled"], active=cfg["onDemandEnabled"], cfg=cfg)
+
+
+def main():
+ asup = NetAppESeriesAsup()
+ asup.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py
new file mode 100644
index 00000000..03a533fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_auditlog
+short_description: NetApp E-Series manage audit-log configuration
+description:
+ - This module allows an e-series storage system owner to set audit-log configuration parameters.
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ max_records:
+ description:
+ - The maximum number log messages audit-log will retain.
+ - Max records must be between and including 100 and 50000.
+ type: int
+ default: 50000
+ log_level:
+ description: Filters the log messages according to the specified log level selection.
+ choices:
+ - all
+ - writeOnly
+ type: str
+ default: writeOnly
+ full_policy:
+ description: Specifies what audit-log should do once the number of entries approach the record limit.
+ choices:
+ - overWrite
+ - preventSystemAccess
+ type: str
+ default: overWrite
+ threshold:
+ description:
+ - This is the memory full percent threshold that audit-log will start issuing warning messages.
+ - Percent range must be between and including 60 and 90.
+ type: int
+ default: 90
+ force:
+ description:
+ - Forces the audit-log configuration to delete log history when log messages fullness cause immediate
+ warning or full condition.
+ - Warning! This will cause any existing audit-log messages to be deleted.
+ - This is only applicable for I(full_policy=preventSystemAccess).
+ type: bool
+ default: no
+notes:
+ - Check mode is supported.
+ - Use I(ssid=="0") or I(ssid=="proxy") to configure SANtricity Web Services Proxy auditlog settings otherwise.
+"""
+
+EXAMPLES = """
+- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity.
+ na_santricity_auditlog:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ max_records: 50000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+import json
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesAuditLog(NetAppESeriesModule):
+ """Audit-log module configuration class."""
+ MAX_RECORDS = 50000
+
+ def __init__(self):
+ ansible_options = dict(max_records=dict(type="int", default=50000),
+ log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]),
+ full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]),
+ threshold=dict(type="int", default=90),
+ force=dict(type="bool", default=False))
+ super(NetAppESeriesAuditLog, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.log_level = args["log_level"]
+ self.force = args["force"]
+ self.full_policy = args["full_policy"]
+ self.max_records = args["max_records"]
+ self.threshold = args["threshold"]
+
+ if self.max_records < 100 or self.max_records > self.MAX_RECORDS:
+ self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]" % self.max_records)
+
+ if self.threshold < 60 or self.threshold > 90:
+ self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold)
+
+ # Append web services proxy forward end point.
+ self.url_path_prefix = ""
+ if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ def get_configuration(self):
+ """Retrieve the existing audit-log configurations.
+
+ :returns: dictionary containing current audit-log configuration
+ """
+ try:
+ if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"):
+ rc, data = self.request("audit-log/config")
+ else:
+ rc, data = self.request(self.url_path_prefix + "storage-systems/1/audit-log/config")
+ return data
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def build_configuration(self):
+ """Build audit-log expected configuration.
+
+ :returns: Tuple containing update boolean value and dictionary of audit-log configuration
+ """
+ config = self.get_configuration()
+
+ current = dict(auditLogMaxRecords=config["auditLogMaxRecords"],
+ auditLogLevel=config["auditLogLevel"],
+ auditLogFullPolicy=config["auditLogFullPolicy"],
+ auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"])
+
+ body = dict(auditLogMaxRecords=self.max_records,
+ auditLogLevel=self.log_level,
+ auditLogFullPolicy=self.full_policy,
+ auditLogWarningThresholdPct=self.threshold)
+
+ update = current != body
+ return update, body
+
+ def delete_log_messages(self):
+ """Delete all audit-log messages."""
+ try:
+ if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"):
+ rc, result = self.request("audit-log?clearAll=True", method="DELETE")
+ else:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/1/audit-log?clearAll=True", method="DELETE")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_configuration(self, update=None, body=None, attempt_recovery=True):
+ """Update audit-log configuration."""
+ if update is None or body is None:
+ update, body = self.build_configuration()
+
+ if update and not self.module.check_mode:
+ try:
+ if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"):
+ rc, result = self.request("audit-log/config", data=json.dumps(body), method='POST', ignore_errors=True)
+ else:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/1/audit-log/config",
+ data=json.dumps(body), method='POST', ignore_errors=True)
+
+ if rc == 422:
+ if self.force and attempt_recovery:
+ self.delete_log_messages()
+ update = self.update_configuration(update, body, False)
+ else:
+ self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(rc, result)))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ return update
+
+ def update(self):
+ """Update the audit-log configuration."""
+ update = self.update_configuration()
+ if update:
+ self.module.exit_json(msg="Audit-log update complete", changed=update)
+ else:
+ self.module.exit_json(msg="No audit-log changes required", changed=update)
+
+
+def main():
+ auditlog = NetAppESeriesAuditLog()
+ auditlog.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py
new file mode 100644
index 00000000..bde84c9f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_auth
+short_description: NetApp E-Series set or update the password for a storage array device or SANtricity Web Services Proxy.
+description:
+ - Sets or updates the password for a storage array device or SANtricity Web Services Proxy.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ current_admin_password:
+ description:
+ - The current admin password.
+ - When making changes to the embedded web services's login passwords, api_password will be used and current_admin_password will be ignored.
+ - When making changes to the proxy web services's login passwords, api_password will be used and current_admin_password will be ignored.
+ - Only required when the password has been set and will be ignored if not set.
+ type: str
+ required: false
+ password:
+ description:
+ - The password you would like to set.
+ - Cannot be more than 30 characters.
+ type: str
+ required: false
+ user:
+ description:
+ - The local user account password to update
+ - For systems prior to E2800, use admin to change the rw (system password).
+ - For systems prior to E2800, all choices except admin will be ignored.
+ type: str
+ choices: ["admin", "monitor", "support", "security", "storage"]
+ default: "admin"
+ required: false
+ minimum_password_length:
+ description:
+ - This option defines the minimum password length.
+ type: int
+ required: false
+notes:
+ - Set I(ssid=="0") or I(ssid=="proxy") when attempting to change the password for SANtricity Web Services Proxy.
+ - SANtricity Web Services Proxy storage password will be updated when changing the password on a managed storage system from the proxy; This is only true
+ when the storage system has been previously contacted.
+"""
+
+EXAMPLES = """
+- name: Set the initial password
+ na_santricity_auth:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ validate_certs: true
+ current_admin_password: currentadminpass
+ password: newpassword123
+ user: admin
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: "Password Updated Successfully"
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+from time import sleep
+
+
+class NetAppESeriesAuth(NetAppESeriesModule):
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(current_admin_password=dict(type="str", required=False, no_log=True),
+ password=dict(type="str", required=False, no_log=True),
+ user=dict(type="str", choices=["admin", "monitor", "support", "security", "storage"], default="admin", required=False),
+ minimum_password_length=dict(type="int", required=False, no_log=True))
+
+ super(NetAppESeriesAuth, self).__init__(ansible_options=ansible_options, web_services_version=version, supports_check_mode=True)
+ args = self.module.params
+ self.current_admin_password = args["current_admin_password"]
+ self.password = args["password"]
+ self.user = args["user"]
+ self.minimum_password_length = args["minimum_password_length"]
+
+ self.DEFAULT_HEADERS.update({"x-netapp-password-validate-method": "none"})
+
+ self.is_admin_password_set = None
+ self.current_password_length_requirement = None
+
+ def minimum_password_length_change_required(self):
+ """Retrieve the current storage array's global configuration."""
+ change_required = False
+ try:
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ rc, system_info = self.request("local-users/info", force_basic_auth=False)
+
+ elif self.is_embedded_available():
+ rc, system_info = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/info" % self.ssid,
+ force_basic_auth=False)
+ else:
+ return False # legacy systems without embedded web services.
+ else:
+ rc, system_info = self.request("storage-systems/%s/local-users/info" % self.ssid, force_basic_auth=False)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine minimum password length. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ self.is_admin_password_set = system_info["adminPasswordSet"]
+ if self.minimum_password_length is not None and self.minimum_password_length != system_info["minimumPasswordLength"]:
+ change_required = True
+
+ if (self.password is not None and ((change_required and self.minimum_password_length > len(self.password)) or
+ (not change_required and system_info["minimumPasswordLength"] > len(self.password)))):
+ self.module.fail_json(msg="Password does not meet the length requirement [%s]. Array Id [%s]." % (system_info["minimumPasswordLength"], self.ssid))
+
+ return change_required
+
+ def update_minimum_password_length(self):
+ """Update automatic load balancing state."""
+ try:
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ try:
+ if not self.is_admin_password_set:
+ self.creds["url_password"] = "admin"
+ rc, minimum_password_length = self.request("local-users/password-length", method="POST",
+ data={"minimumPasswordLength": self.minimum_password_length})
+ except Exception as error:
+ if not self.is_admin_password_set:
+ self.creds["url_password"] = ""
+ rc, minimum_password_length = self.request("local-users/password-length", method="POST",
+ data={"minimumPasswordLength": self.minimum_password_length})
+ elif self.is_embedded_available():
+ if not self.is_admin_password_set:
+ self.creds["url_password"] = ""
+ rc, minimum_password_length = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/password-length" % self.ssid,
+ method="POST", data={"minimumPasswordLength": self.minimum_password_length})
+ else:
+ if not self.is_admin_password_set:
+ self.creds["url_password"] = ""
+ rc, minimum_password_length = self.request("storage-systems/%s/local-users/password-length" % self.ssid, method="POST",
+ data={"minimumPasswordLength": self.minimum_password_length})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set minimum password length. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def logout_system(self):
+ """Ensure system is logged out. This is required because login test will always succeed if previously logged in."""
+ try:
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ rc, system_info = self.request("utils/login", rest_api_path=self.DEFAULT_BASE_PATH, method="DELETE", force_basic_auth=False)
+ elif self.is_embedded_available():
+ rc, system_info = self.request("storage-systems/%s/forward/devmgr/utils/login" % self.ssid, method="DELETE", force_basic_auth=False)
+ else:
+ # Nothing to do for legacy systems without embedded web services.
+ pass
+ else:
+ rc, system_info = self.request("utils/login", rest_api_path=self.DEFAULT_BASE_PATH, method="DELETE", force_basic_auth=False)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to log out of storage system [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def password_change_required(self):
+ """Verify whether the current password is expected array password. Works only against embedded systems."""
+ if self.password is None:
+ return False
+
+ change_required = False
+ system_info = None
+ try:
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ rc, system_info = self.request("local-users/info", force_basic_auth=False)
+ elif self.is_embedded_available():
+ rc, system_info = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/info" % self.ssid,
+ force_basic_auth=False)
+ else:
+ rc, response = self.request("storage-systems/%s/passwords" % self.ssid, ignore_errors=True)
+ system_info = {"minimumPasswordLength": 0, "adminPasswordSet": response["adminPasswordSet"]}
+ else:
+ rc, system_info = self.request("storage-systems/%s/local-users/info" % self.ssid, force_basic_auth=False)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve information about storage system [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ self.is_admin_password_set = system_info["adminPasswordSet"]
+
+ if not self.is_admin_password_set:
+ if self.user == "admin" and self.password != "":
+ change_required = True
+
+ # Determine whether user's password needs to be changed
+ else:
+ utils_login_used = False
+ self.logout_system() # This ensures that login test functions correctly. The query onlycheck=true does not work.
+
+ if self.is_proxy():
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ utils_login_used = True
+ rc, response = self.request("utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false" % (self.user, self.password),
+ rest_api_path=self.DEFAULT_BASE_PATH, log_request=False, ignore_errors=True, force_basic_auth=False)
+ # elif self.is_embedded_available():
+ # utils_login_used = True
+ # rc, response = self.request("storage-systems/%s/forward/devmgr/utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false"
+ # % (self.ssid, self.user, self.password), log_request=False, ignore_errors=True, force_basic_auth=False)
+ else:
+ if self.user == "admin":
+ rc, response = self.request("storage-systems/%s/stored-password/validate" % self.ssid, method="POST", log_request=False,
+ ignore_errors=True, data={"password": self.password})
+ if rc == 200:
+ change_required = not response["isValidPassword"]
+ elif rc == 404: # endpoint did not exist, old proxy version
+ if self.is_web_services_version_met("04.10.0000.0000"):
+ self.module.fail_json(msg="For platforms before E2800 use SANtricity Web Services Proxy 4.1 or later! Array Id [%s].")
+ self.module.fail_json(msg="Failed to validate stored password! Array Id [%s].")
+ else:
+ self.module.fail_json(msg="Failed to validate stored password! Array Id [%s]." % self.ssid)
+ else:
+ self.module.fail_json(msg="Role based login not available! Only storage system password can be set for storage systems prior to E2800."
+ " Array Id [%s]." % self.ssid)
+ else:
+ utils_login_used = True
+ rc, response = self.request("utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false" % (self.user, self.password),
+ rest_api_path=self.DEFAULT_BASE_PATH, log_request=False, ignore_errors=True, force_basic_auth=False)
+
+ # Check return codes to determine whether a change is required
+ if utils_login_used:
+ if rc == 401:
+ change_required = True
+ elif rc == 422:
+ self.module.fail_json(msg="SAML enabled! SAML disables default role based login. Array [%s]" % self.ssid)
+
+ return change_required
+
+ def set_array_admin_password(self):
+ """Set the array's admin password."""
+ if self.is_proxy():
+
+ # Update proxy's local users
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ self.creds["url_password"] = "admin"
+ try:
+ body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}}
+ rc, proxy = self.request("local-users", method="POST", data=body)
+ except Exception as error:
+ self.creds["url_password"] = ""
+ try:
+ body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}}
+ rc, proxy = self.request("local-users", method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set proxy's admin password. Error [%s]." % to_native(error))
+
+ self.creds["url_password"] = self.password
+
+ # Update password using the password endpoints, this will also update the storaged password
+ else:
+ try:
+ body = {"currentAdminPassword": "", "newPassword": self.password, "adminPassword": True}
+ rc, storage_system = self.request("storage-systems/%s/passwords" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set storage system's admin password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Update embedded local users
+ else:
+ self.creds["url_password"] = ""
+ try:
+ body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}}
+ rc, proxy = self.request("storage-systems/%s/local-users" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set embedded storage system's admin password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ self.creds["url_password"] = self.password
+
+ def set_array_password(self):
+ """Set the array password."""
+ if not self.is_admin_password_set:
+ self.module.fail_json(msg="Admin password not set! Set admin password before changing non-admin user passwords. Array [%s]." % self.ssid)
+
+ if self.is_proxy():
+
+ # Update proxy's local users
+ if self.ssid == "0" or self.ssid.lower() == "proxy":
+ try:
+ body = {"currentAdminPassword": self.creds["url_password"], "updates": {"userName": self.user, "newPassword": self.password}}
+ rc, proxy = self.request("local-users", method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set proxy password. Error [%s]." % to_native(error))
+ elif self.is_embedded_available():
+ try:
+ body = {"currentAdminPassword": self.current_admin_password, "updates": {"userName": self.user, "newPassword": self.password}}
+ rc, proxy = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set embedded user password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Update embedded local users
+ else:
+ try:
+ body = {"currentAdminPassword": self.creds["url_password"], "updates": {"userName": self.user, "newPassword": self.password}}
+ rc, proxy = self.request("storage-systems/%s/local-users" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set embedded user password. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def apply(self):
+ """Apply any required changes."""
+ password_change_required = self.password_change_required()
+ minimum_password_length_change_required = self.minimum_password_length_change_required()
+ change_required = password_change_required or minimum_password_length_change_required
+
+ if change_required and not self.module.check_mode:
+ if minimum_password_length_change_required:
+ self.update_minimum_password_length()
+
+ if password_change_required:
+ if not self.is_admin_password_set:
+ self.set_array_admin_password()
+ else:
+ self.set_array_password()
+
+ if password_change_required and minimum_password_length_change_required:
+ self.module.exit_json(msg="'%s' password and required password length has been changed. Array [%s]."
+ % (self.user, self.ssid), changed=change_required)
+ elif password_change_required:
+ self.module.exit_json(msg="'%s' password has been changed. Array [%s]." % (self.user, self.ssid), changed=change_required)
+ elif minimum_password_length_change_required:
+ self.module.exit_json(msg="Required password length has been changed. Array [%s]." % self.ssid, changed=change_required)
+ self.module.exit_json(msg="No changes have been made. Array [%s]." % self.ssid, changed=change_required)
+
+
+def main():
+ auth = NetAppESeriesAuth()
+ auth.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py
new file mode 100644
index 00000000..8d5ee4bd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: na_santricity_client_certificate
+short_description: NetApp E-Series manage remote server certificates.
+description: Manage NetApp E-Series storage array's remote server certificates.
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ certificates:
+ description:
+ - List of certificate files
+ - Each item must include the path to the file
+ type: list
+ required: false
+notes:
+ - Set I(ssid=="0") or I(ssid=="proxy") to specifically reference SANtricity Web Services Proxy.
+requirements:
+ - cryptography
+"""
+EXAMPLES = """
+- name: Upload certificates
+ na_santricity_client_certificate:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+ certificates: ["/path/to/certificates.crt", "/path/to/another_certificate.crt"]
+- name: Remove all certificates
+ na_santricity_client_certificate:
+ ssid: 1
+ api_url: https://192.168.1.100:8443/devmgr/v2
+ api_username: admin
+ api_password: adminpass
+"""
+RETURN = """
+changed:
+ description: Whether changes have been made.
+ type: bool
+ returned: always
+ sample: true
+add_certificates:
+ description: Any SSL certificates that were added.
+ type: list
+ returned: always
+ sample: ["added_cerificiate.crt"]
+removed_certificates:
+ description: Any SSL certificates that were removed.
+ type: list
+ returned: always
+ sample: ["removed_cerificiate.crt"]
+"""
+
+import binascii
+import os
+import re
+
+from datetime import datetime
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata
+from ansible.module_utils._text import to_native
+
+try:
+ from cryptography import x509
+ from cryptography.hazmat.backends import default_backend
+except ImportError:
+ HAS_CRYPTOGRAPHY = False
+else:
+ HAS_CRYPTOGRAPHY = True
+
+
+class NetAppESeriesClientCertificate(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(certificates=dict(type="list", required=False))
+
+ super(NetAppESeriesClientCertificate, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.certificates = args["certificates"] if args["certificates"] else []
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if self.is_proxy() and self.ssid != "0" and self.ssid != "PROXY":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ self.remove_certificates = list()
+ self.add_certificates = list()
+ self.certificate_fingerprint_cache = None
+ self.certificate_info_cache = None
+
+ def certificate_info(self, path):
+ """Determine the pertinent certificate information: alias, subjectDN, issuerDN, start and expire.
+
+ Note: Use only when certificate/remote-server endpoints do not exist. Used to identify certificates through
+ the sslconfig/ca endpoint.
+ """
+ certificate = None
+ with open(path, "rb") as fh:
+ data = fh.read()
+ try:
+ certificate = x509.load_pem_x509_certificate(data, default_backend())
+ except Exception as error:
+ try:
+ certificate = x509.load_der_x509_certificate(data, default_backend())
+ except Exception as error:
+ self.module.fail_json(msg="Failed to load certificate. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if not isinstance(certificate, x509.Certificate):
+ self.module.fail_json(msg="Failed to open certificate file or invalid certificate object type. Array [%s]." % self.ssid)
+
+ return dict(start_date=certificate.not_valid_before,
+ expire_date=certificate.not_valid_after,
+ subject_dn=[attr.value for attr in certificate.subject],
+ issuer_dn=[attr.value for attr in certificate.issuer])
+
+ def certificate_fingerprint(self, path):
+ """Load x509 certificate that is either encoded DER or PEM encoding and return the certificate fingerprint."""
+ certificate = None
+ with open(path, "rb") as fh:
+ data = fh.read()
+ try:
+ certificate = x509.load_pem_x509_certificate(data, default_backend())
+ except Exception as error:
+ try:
+ certificate = x509.load_der_x509_certificate(data, default_backend())
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine certificate fingerprint. File [%s]. Array [%s]. Error [%s]."
+ % (path, self.ssid, to_native(error)))
+
+ return binascii.hexlify(certificate.fingerprint(certificate.signature_hash_algorithm)).decode("utf-8")
+
+ def determine_changes(self):
+ """Search for remote server certificate that goes by the alias or has a matching fingerprint."""
+ rc, current_certificates = self.request(self.url_path_prefix + "certificates/remote-server", ignore_errors=True)
+
+ if rc == 404: # system down or endpoint does not exist
+ rc, current_certificates = self.request(self.url_path_prefix + "sslconfig/ca?useTruststore=true", ignore_errors=True)
+
+ if rc > 299:
+ self.module.fail_json(msg="Failed to retrieve remote server certificates. Array [%s]." % self.ssid)
+
+ user_installed_certificates = [certificate for certificate in current_certificates if certificate["isUserInstalled"]]
+ existing_certificates = []
+
+ for path in self.certificates:
+ for current_certificate in user_installed_certificates:
+ info = self.certificate_info(path)
+ tmp = dict(subject_dn=[re.sub(r".*=", "", item) for item in current_certificate["subjectDN"].split(", ")],
+ issuer_dn=[re.sub(r".*=", "", item) for item in current_certificate["issuerDN"].split(", ")],
+ start_date=datetime.strptime(current_certificate["start"].split(".")[0], "%Y-%m-%dT%H:%M:%S"),
+ expire_date=datetime.strptime(current_certificate["expire"].split(".")[0], "%Y-%m-%dT%H:%M:%S"))
+ if (all([attr in info["subject_dn"] for attr in tmp["subject_dn"]]) and
+ all([attr in info["issuer_dn"] for attr in tmp["issuer_dn"]]) and
+ tmp["start_date"] == info["start_date"] and
+ tmp["expire_date"] == info["expire_date"]):
+ existing_certificates.append(current_certificate)
+ break
+ else:
+ self.add_certificates.append(path)
+ self.remove_certificates = [certificate for certificate in user_installed_certificates if certificate not in existing_certificates]
+
+ elif rc > 299:
+ self.module.fail_json(msg="Failed to retrieve remote server certificates. Array [%s]." % self.ssid)
+
+ else:
+ user_installed_certificates = [certificate for certificate in current_certificates if certificate["isUserInstalled"]]
+ existing_certificates = []
+ for path in self.certificates:
+ for current_certificate in user_installed_certificates:
+ fingerprint = self.certificate_fingerprint(path)
+ if current_certificate["sha256Fingerprint"] == fingerprint or current_certificate["shaFingerprint"] == fingerprint:
+ existing_certificates.append(current_certificate)
+ break
+ else:
+ self.add_certificates.append(path)
+ self.remove_certificates = [certificate for certificate in user_installed_certificates if certificate not in existing_certificates]
+
+ def upload_certificate(self, path):
+ """Add or update remote server certificate to the storage array."""
+ file_name = os.path.basename(path)
+ headers, data = create_multipart_formdata(files=[("file", file_name, path)])
+
+ rc, resp = self.request(self.url_path_prefix + "certificates/remote-server", method="POST", headers=headers, data=data, ignore_errors=True)
+ if rc == 404:
+ rc, resp = self.request(self.url_path_prefix + "sslconfig/ca?useTruststore=true", method="POST", headers=headers, data=data, ignore_errors=True)
+
+ if rc > 299:
+ self.module.fail_json(msg="Failed to upload certificate. Array [%s]. Error [%s, %s]." % (self.ssid, rc, resp))
+
+ def delete_certificate(self, info):
+ """Delete existing remote server certificate in the storage array truststore."""
+ rc, resp = self.request(self.url_path_prefix + "certificates/remote-server/%s" % info["alias"], method="DELETE", ignore_errors=True)
+
+ if rc == 404:
+ rc, resp = self.request(self.url_path_prefix + "sslconfig/ca/%s?useTruststore=true" % info["alias"], method="DELETE", ignore_errors=True)
+
+ if rc > 204:
+ self.module.fail_json(msg="Failed to delete certificate. Alias [%s]. Array [%s]. Error [%s, %s]." % (info["alias"], self.ssid, rc, resp))
+
+ def apply(self):
+ """Apply state changes to the storage array's truststore."""
+ changed = False
+
+ self.determine_changes()
+ if self.remove_certificates or self.add_certificates:
+ changed = True
+
+ if changed and not self.module.check_mode:
+ for info in self.remove_certificates:
+ self.delete_certificate(info)
+
+ for path in self.add_certificates:
+ self.upload_certificate(path)
+
+ self.module.exit_json(changed=changed, removed_certificates=self.remove_certificates, add_certificates=self.add_certificates)
+
+
+def main():
+ client_certs = NetAppESeriesClientCertificate()
+ client_certs.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py
new file mode 100644
index 00000000..90e0649b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_discover
+short_description: NetApp E-Series discover E-Series storage systems
+description: Module searches a subnet range and returns any available E-Series storage systems.
+author: Nathan Swartz (@ndswartz)
+options:
+ subnet_mask:
+ description:
+ - This is the IPv4 search range for discovering E-Series storage arrays.
+ - IPv4 subnet mask specified in CIDR form. Example 192.168.1.0/24 would search the range 192.168.1.0 to 192.168.1.255.
+ - Be sure to include all management paths in the search range.
+ type: str
+ required: true
+ ports:
+ description:
+ - This option specifies which ports to be tested during the discovery process.
+ - The first usable port will be used in the returned API url.
+ type: list
+ default: [8443]
+ required: false
+ proxy_url:
+ description:
+ - Web Services Proxy REST API URL. Example https://192.168.1.100:8443/devmgr/v2/
+ type: str
+ required: false
+ proxy_username:
+ description:
+ - Web Service Proxy username
+ type: str
+ required: false
+ proxy_password:
+ description:
+ - Web Service Proxy user password
+ type: str
+ required: false
+ proxy_validate_certs:
+ description:
+ - Whether to validate Web Service Proxy SSL certificate
+ type: bool
+ default: true
+ required: false
+ prefer_embedded:
+ description:
+ - Give preference to Web Services Embedded when an option exists for both Web Services Proxy and Embedded.
+ - Web Services Proxy will be utilized when available by default.
+ type: bool
+ default: false
+ required: false
+notes:
+ - Only available for platforms E2800 or later (SANtricity Web Services Embedded REST API must be available).
+ - All E-Series storage systems with SANtricity version 11.62 or later will be discovered.
+ - Only E-Series storage systems without a set admin password running SANtricity versions prior to 11.62 will be discovered.
+ - Use SANtricity Web Services Proxy to discover all systems regardless of SANricity version or password.
+requirements:
+ - ipaddress
+"""
+
+EXAMPLES = """
+- name: Discover all E-Series storage systems on the network.
+ na_santricity_discover:
+ subnet_mask: 192.168.1.0/24
+"""
+
+RETURN = """
+systems_found:
+ description: Success message
+ returned: on success
+ type: dict
+ sample: {"012341234123": {
+ "addresses": ["192.168.1.184", "192.168.1.185"],
+ "api_urls": ["https://192.168.1.184:8443/devmgr/v2/", "https://192.168.1.185:8443/devmgr/v2/"],
+ "label": "ExampleArray01",
+ "proxy_ssid: "",
+ "proxy_required": false},
+ "012341234567": {
+ "addresses": ["192.168.1.23", "192.168.1.24"],
+ "api_urls": ["https://192.168.1.100:8443/devmgr/v2/"],
+ "label": "ExampleArray02",
+ "proxy_ssid": "array_ssid",
+ "proxy_required": true}}
+"""
+
+import json
+import multiprocessing
+import threading
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import request
+from ansible.module_utils._text import to_native
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+try:
+ import ipaddress
+except ImportError:
+ HAS_IPADDRESS = False
+else:
+ HAS_IPADDRESS = True
+
+
+class NetAppESeriesDiscover:
+ """Discover E-Series storage systems."""
+ MAX_THREAD_POOL_SIZE = 256
+ CPU_THREAD_MULTIPLE = 32
+ SEARCH_TIMEOUT = 30
+ DEFAULT_CONNECTION_TIMEOUT_SEC = 30
+ DEFAULT_DISCOVERY_TIMEOUT_SEC = 300
+
+ def __init__(self):
+ ansible_options = dict(subnet_mask=dict(type="str", required=True),
+ ports=dict(type="list", required=False, default=[8443]),
+ proxy_url=dict(type="str", required=False),
+ proxy_username=dict(type="str", required=False),
+ proxy_password=dict(type="str", required=False, no_log=True),
+ proxy_validate_certs=dict(type="bool", default=True, required=False),
+ prefer_embedded=dict(type="bool", default=False, required=False))
+
+ required_together = [["proxy_url", "proxy_username", "proxy_password"]]
+ self.module = AnsibleModule(argument_spec=ansible_options, required_together=required_together)
+ args = self.module.params
+
+ self.subnet_mask = args["subnet_mask"]
+ self.prefer_embedded = args["prefer_embedded"]
+ self.ports = []
+ self.proxy_url = args["proxy_url"]
+ if args["proxy_url"]:
+ parsed_url = list(urlparse.urlparse(args["proxy_url"]))
+ parsed_url[2] = "/devmgr/utils/about"
+ self.proxy_about_url = urlparse.urlunparse(parsed_url)
+ parsed_url[2] = "/devmgr/v2/"
+ self.proxy_url = urlparse.urlunparse(parsed_url)
+ self.proxy_username = args["proxy_username"]
+ self.proxy_password = args["proxy_password"]
+ self.proxy_validate_certs = args["proxy_validate_certs"]
+
+ for port in args["ports"]:
+ if str(port).isdigit() and 0 < port < 2 ** 16:
+ self.ports.append(str(port))
+ else:
+ self.module.fail_json(msg="Invalid port! Ports must be positive numbers between 0 and 65536.")
+
+ self.systems_found = {}
+
+ def check_ip_address(self, systems_found, address):
+ """Determine where an E-Series storage system is available at a specific ip address."""
+ for port in self.ports:
+ if port == "8080":
+ url = "http://%s:%s/devmgr/v2/storage-systems/1/" % (address, port)
+ else:
+ url = "https://%s:%s/devmgr/v2/storage-systems/1/" % (address, port)
+ try:
+ rc, sa_data = request(url + "symbol/getSAData", validate_certs=False, force_basic_auth=False, ignore_errors=True)
+ if rc == 401: # Unauthorized
+ self.module.warn("Fail over and discover any storage system without a set admin password. This will discover systems without a set password"
+ " such as newly deployed storage systems. Address [%s]." % address)
+ # Fail over and discover any storage system without a set admin password. This will cover newly deployed systems.
+ rc, graph = request(url + "graph", validate_certs=False, url_username="admin", url_password="", timeout=self.SEARCH_TIMEOUT)
+ sa_data = graph["sa"]["saData"]
+
+ if sa_data["chassisSerialNumber"] in systems_found:
+ systems_found[sa_data["chassisSerialNumber"]]["api_urls"].append(url)
+ else:
+ systems_found.update({sa_data["chassisSerialNumber"]: {"api_urls": [url], "label": sa_data["storageArrayLabel"],
+ "addresses": [], "proxy_ssid": "", "proxy_required": False}})
+ break
+ except Exception as error:
+ pass
+
+ def no_proxy_discover(self):
+ """Discover E-Series storage systems using embedded web services."""
+ thread_pool_size = min(multiprocessing.cpu_count() * self.CPU_THREAD_MULTIPLE, self.MAX_THREAD_POOL_SIZE)
+ subnet = list(ipaddress.ip_network(u"%s" % self.subnet_mask))
+
+ thread_pool = []
+ search_count = len(subnet)
+ for start in range(0, search_count, thread_pool_size):
+ end = search_count if (search_count - start) < thread_pool_size else start + thread_pool_size
+
+ for address in subnet[start:end]:
+ thread = threading.Thread(target=self.check_ip_address, args=(self.systems_found, address))
+ thread_pool.append(thread)
+ thread.start()
+ for thread in thread_pool:
+ thread.join()
+
+ def verify_proxy_service(self):
+ """Verify proxy url points to a web services proxy."""
+ try:
+ rc, about = request(self.proxy_about_url, validate_certs=self.proxy_validate_certs)
+ if not about["runningAsProxy"]:
+ self.module.fail_json(msg="Web Services is not running as a proxy!")
+ except Exception as error:
+ self.module.fail_json(msg="Proxy is not available! Check proxy_url. Error [%s]." % to_native(error))
+
+ def test_systems_found(self, systems_found, serial, label, addresses):
+ """Verify and build api urls."""
+ api_urls = []
+ for address in addresses:
+ for port in self.ports:
+ if port == "8080":
+ url = "http://%s:%s/devmgr/" % (address, port)
+ else:
+ url = "https://%s:%s/devmgr/" % (address, port)
+
+ try:
+ rc, response = request(url + "utils/about", validate_certs=False, timeout=self.SEARCH_TIMEOUT)
+ api_urls.append(url + "v2/")
+ break
+ except Exception as error:
+ pass
+ systems_found.update({serial: {"api_urls": api_urls,
+ "label": label,
+ "addresses": addresses,
+ "proxy_ssid": "",
+ "proxy_required": False}})
+
+ def proxy_discover(self):
+ """Search for array using it's chassis serial from web services proxy."""
+ self.verify_proxy_service()
+ subnet = ipaddress.ip_network(u"%s" % self.subnet_mask)
+
+ try:
+ rc, request_id = request(self.proxy_url + "discovery", method="POST", validate_certs=self.proxy_validate_certs,
+ force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password,
+ data=json.dumps({"startIP": str(subnet[0]), "endIP": str(subnet[-1]),
+ "connectionTimeout": self.DEFAULT_CONNECTION_TIMEOUT_SEC}))
+
+ # Wait for discover to complete
+ try:
+ for iteration in range(self.DEFAULT_DISCOVERY_TIMEOUT_SEC):
+ rc, discovered_systems = request(self.proxy_url + "discovery?requestId=%s" % request_id["requestId"],
+ validate_certs=self.proxy_validate_certs,
+ force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password)
+ if not discovered_systems["discoverProcessRunning"]:
+ thread_pool = []
+ for discovered_system in discovered_systems["storageSystems"]:
+ addresses = []
+ for controller in discovered_system["controllers"]:
+ addresses.extend(controller["ipAddresses"])
+
+ # Storage systems with embedded web services.
+ if "https" in discovered_system["supportedManagementPorts"] and self.prefer_embedded:
+
+ thread = threading.Thread(target=self.test_systems_found,
+ args=(self.systems_found, discovered_system["serialNumber"], discovered_system["label"], addresses))
+ thread_pool.append(thread)
+ thread.start()
+
+ # Storage systems without embedded web services.
+ else:
+ self.systems_found.update({discovered_system["serialNumber"]: {"api_urls": [self.proxy_url],
+ "label": discovered_system["label"],
+ "addresses": addresses,
+ "proxy_ssid": "",
+ "proxy_required": True}})
+ for thread in thread_pool:
+ thread.join()
+ break
+ sleep(1)
+ else:
+ self.module.fail_json(msg="Timeout waiting for array discovery process. Subnet [%s]" % self.subnet_mask)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to get the discovery results. Error [%s]." % to_native(error))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate array discovery. Error [%s]." % to_native(error))
+
+ def update_proxy_with_proxy_ssid(self):
+ """Determine the current proxy ssid for all discovered-proxy_required storage systems."""
+ # Discover all added storage systems to the proxy.
+ systems = []
+ try:
+ rc, systems = request(self.proxy_url + "storage-systems", validate_certs=self.proxy_validate_certs,
+ force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to ascertain storage systems added to Web Services Proxy.")
+
+ for system_key, system_info in self.systems_found.items():
+ if self.systems_found[system_key]["proxy_required"]:
+ for system in systems:
+ if system_key == system["chassisSerialNumber"]:
+ self.systems_found[system_key]["proxy_ssid"] = system["id"]
+
+ def discover(self):
+ """Discover E-Series storage systems."""
+ if self.proxy_url:
+ self.proxy_discover()
+ self.update_proxy_with_proxy_ssid()
+ else:
+ self.no_proxy_discover()
+
+ self.module.exit_json(msg="Discover process complete.", systems_found=self.systems_found, changed=False)
+
+
+def main():
+ discover = NetAppESeriesDiscover()
+ discover.discover()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py
new file mode 100644
index 00000000..cbde9395
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_drive_firmware
+short_description: NetApp E-Series manage drive firmware
+description:
+ - Ensure drive firmware version is activated on specified drive model.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ firmware:
+ description:
+ - list of drive firmware file paths.
+ - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
+ type: list
+ required: True
+ wait_for_completion:
+ description:
+ - This flag will cause module to wait for any upgrade actions to complete.
+ type: bool
+ default: false
+ ignore_inaccessible_drives:
+ description:
+ - This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible.
+ type: bool
+ default: false
+ upgrade_drives_online:
+ description:
+ - This flag will determine whether drive firmware can be upgrade while drives are accepting I/O.
+ - When I(upgrade_drives_online==False) stop all I/O before running task.
+ type: bool
+ default: true
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ na_santricity_drive_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ firmware: "path/to/drive_firmware"
+ wait_for_completion: true
+ ignore_inaccessible_drives: false
+"""
+RETURN = """
+msg:
+ description: Whether any drive firmware was upgraded and whether it is in progress.
+ type: str
+ returned: always
+ sample:
+ { changed: True, upgrade_in_process: True }
+"""
+import os
+import re
+
+from time import sleep
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesDriveFirmware(NetAppESeriesModule):
+ WAIT_TIMEOUT_SEC = 60 * 15
+
+ def __init__(self):
+ ansible_options = dict(
+ firmware=dict(type="list", required=True),
+ wait_for_completion=dict(type="bool", default=False),
+ ignore_inaccessible_drives=dict(type="bool", default=False),
+ upgrade_drives_online=dict(type="bool", default=True))
+
+ super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.firmware_list = args["firmware"]
+ self.wait_for_completion = args["wait_for_completion"]
+ self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"]
+ self.upgrade_drives_online = args["upgrade_drives_online"]
+
+ self.upgrade_list_cache = None
+
+ self.upgrade_required_cache = None
+ self.upgrade_in_progress = False
+ self.drive_info_cache = None
+
+ def upload_firmware(self):
+ """Ensure firmware has been upload prior to uploaded."""
+ for firmware in self.firmware_list:
+ firmware_name = os.path.basename(firmware)
+ files = [("file", firmware_name, firmware)]
+ headers, data = create_multipart_formdata(files)
+ try:
+ rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error)))
+
+ def upgrade_list(self):
+ """Determine whether firmware is compatible with the specified drives."""
+ if self.upgrade_list_cache is None:
+ self.upgrade_list_cache = list()
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid)
+
+ # Create upgrade list, this ensures only the firmware uploaded is applied
+ for firmware in self.firmware_list:
+ filename = os.path.basename(firmware)
+
+ for uploaded_firmware in response["compatibilities"]:
+ if uploaded_firmware["filename"] == filename:
+
+ # Determine whether upgrade is required
+ drive_reference_list = []
+ for drive in uploaded_firmware["compatibleDrives"]:
+ try:
+ rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"]))
+
+ # Add drive references that are supported and differ from current firmware
+ if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and
+ uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]):
+
+ if self.ignore_inaccessible_drives or (not drive_info["offline"] and drive_info["available"]):
+ drive_reference_list.append(drive["driveRef"])
+
+ if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online:
+ self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]."
+ % (self.ssid, drive["driveRef"]))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]."
+ % (self.ssid, drive["driveRef"], to_native(error)))
+
+ if drive_reference_list:
+ self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}])
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return self.upgrade_list_cache
+
+ def wait_for_upgrade_completion(self):
+ """Wait for drive firmware upgrade to complete."""
+ drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]]
+ last_status = None
+ for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid)
+
+ # Check drive status
+ for status in response["driveStatus"]:
+ last_status = status
+ if status["driveRef"] in drive_references:
+ if status["status"] == "okay":
+ continue
+ elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]:
+ break
+ else:
+ self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]."
+ % (self.ssid, status["driveRef"], status["status"]))
+ else:
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ sleep(5)
+ else:
+ self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status))
+
+ def upgrade(self):
+ """Apply firmware to applicable drives."""
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s"
+ % (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list())
+ self.upgrade_in_progress = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if self.wait_for_completion:
+ self.wait_for_upgrade_completion()
+
+ def apply(self):
+ """Apply firmware policy has been enforced on E-Series storage system."""
+ self.upload_firmware()
+
+ if self.upgrade_list() and not self.module.check_mode:
+ self.upgrade()
+
+ self.module.exit_json(changed=True if self.upgrade_list() else False,
+ upgrade_in_process=self.upgrade_in_progress)
+
+
+def main():
+ drive_firmware = NetAppESeriesDriveFirmware()
+ drive_firmware.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py
new file mode 100644
index 00000000..55851692
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py
@@ -0,0 +1,918 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: na_santricity_facts
+short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
+description:
+ - The na_santricity_facts module returns a collection of facts regarding NetApp E-Series storage arrays.
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+'''
+
+EXAMPLES = """
+---
+- name: Get array facts
+ na_santricity_facts:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+"""
+
+RETURN = """
+ msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample:
+ - Gathered facts for storage array. Array ID [1].
+ - Gathered facts for web services proxy.
+ storage_array_facts:
+ description: provides details about the array, controllers, management interfaces, hostside interfaces,
+ driveside interfaces, disks, storage pools, volumes, snapshots, and features.
+ returned: on successful inquiry from from embedded web services rest api
+ type: complex
+ contains:
+ netapp_controllers:
+ description: storage array controller list that contains basic controller identification and status
+ type: complex
+ sample:
+ - [{"name": "A", "serial": "021632007299", "status": "optimal"},
+ {"name": "B", "serial": "021632007300", "status": "failed"}]
+ netapp_disks:
+ description: drive list that contains identification, type, and status information for each drive
+ type: complex
+ sample:
+ - [{"available": false,
+ "firmware_version": "MS02",
+ "id": "01000000500003960C8B67880000000000000000",
+ "media_type": "ssd",
+ "product_id": "PX02SMU080 ",
+ "serial_number": "15R0A08LT2BA",
+ "status": "optimal",
+ "tray_ref": "0E00000000000000000000000000000000000000",
+ "usable_bytes": "799629205504" }]
+ netapp_driveside_interfaces:
+ description: drive side interface list that contains identification, type, and speed for each interface
+ type: complex
+ sample:
+ - [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }]
+ - [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }]
+ netapp_enabled_features:
+ description: specifies the enabled features on the storage array.
+ returned: on success
+ type: complex
+ sample:
+ - [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ]
+ netapp_host_groups:
+ description: specifies the host groups on the storage arrays.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }]
+ netapp_hosts:
+ description: specifies the hosts on the storage arrays.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "id": "8203800000000000000000000000000000000000",
+ "name": "host1",
+ "group_id": "85000000600A098000A4B28D003610705C40B964",
+ "host_type_index": 28,
+ "ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" },
+ { "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}]
+ netapp_host_types:
+ description: lists the available host types on the storage array.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "index": 0, "type": "FactoryDefault" },
+ { "index": 1, "type": "W2KNETNCL"},
+ { "index": 2, "type": "SOL" },
+ { "index": 5, "type": "AVT_4M" },
+ { "index": 6, "type": "LNX" },
+ { "index": 7, "type": "LnxALUA" },
+ { "index": 8, "type": "W2KNETCL" },
+ { "index": 9, "type": "AIX MPIO" },
+ { "index": 10, "type": "VmwTPGSALUA" },
+ { "index": 15, "type": "HPXTPGS" },
+ { "index": 17, "type": "SolTPGSALUA" },
+ { "index": 18, "type": "SVC" },
+ { "index": 22, "type": "MacTPGSALUA" },
+ { "index": 23, "type": "WinTPGSALUA" },
+ { "index": 24, "type": "LnxTPGSALUA" },
+ { "index": 25, "type": "LnxTPGSALUA_PM" },
+ { "index": 26, "type": "ONTAP_ALUA" },
+ { "index": 27, "type": "LnxTPGSALUA_SF" },
+ { "index": 28, "type": "LnxDHALUA" },
+ { "index": 29, "type": "ATTOClusterAllOS" }]
+ netapp_hostside_interfaces:
+ description: host side interface list that contains identification, configuration, type, speed, and
+ status information for each interface
+ type: complex
+ sample:
+ - [{"iscsi":
+ [{ "controller": "A",
+ "current_interface_speed": "10g",
+ "ipv4_address": "10.10.10.1",
+ "ipv4_enabled": true,
+ "ipv4_gateway": "10.10.10.1",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "ipv6_enabled": false,
+ "iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76",
+ "link_status": "up",
+ "mtu": 9000,
+ "supported_interface_speeds": [ "10g" ] }]}]
+ netapp_management_interfaces:
+ description: management interface list that contains identification, configuration, and status for
+ each interface
+ type: complex
+ sample:
+ - [{"alias": "ict-2800-A",
+ "channel": 1,
+ "controller": "A",
+ "dns_config_method": "dhcp",
+ "dns_servers": [],
+ "ipv4_address": "10.1.1.1",
+ "ipv4_address_config_method": "static",
+ "ipv4_enabled": true,
+ "ipv4_gateway": "10.113.1.1",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "ipv6_enabled": false,
+ "link_status": "up",
+ "mac_address": "00A098A81B5D",
+ "name": "wan0",
+ "ntp_config_method": "disabled",
+ "ntp_servers": [],
+ "remote_ssh_access": false }]
+ netapp_storage_array:
+ description: provides storage array identification, firmware version, and available capabilities
+ type: dict
+ sample:
+ - {"chassis_serial": "021540006043",
+ "firmware": "08.40.00.01",
+ "name": "ict-2800-11_40",
+ "wwn": "600A098000A81B5D0000000059D60C76",
+ "cacheBlockSizes": [4096,
+ 8192,
+ 16384,
+ 32768],
+ "supportedSegSizes": [8192,
+ 16384,
+ 32768,
+ 65536,
+ 131072,
+ 262144,
+ 524288]}
+ netapp_storage_pools:
+ description: storage pool list that contains identification and capacity information for each pool
+ type: complex
+ sample:
+ - [{"available_capacity": "3490353782784",
+ "id": "04000000600A098000A81B5D000002B45A953A61",
+ "name": "Raid6",
+ "total_capacity": "5399466745856",
+ "used_capacity": "1909112963072" }]
+ netapp_volumes:
+ description: storage volume list that contains identification and capacity information for each volume
+ type: complex
+ sample:
+ - [{"capacity": "5368709120",
+ "id": "02000000600A098000AAC0C3000002C45A952BAA",
+ "is_thin_provisioned": false,
+ "name": "5G",
+ "parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }]
+ netapp_workload_tags:
+ description: workload tag list
+ type: complex
+ sample:
+ - [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38",
+ "name": "ftp_server",
+ "workloadAttributes": [{"key": "use",
+ "value": "general"}]}]
+ netapp_volumes_by_initiators:
+ description: list of available volumes keyed by the mapped initiators.
+ type: complex
+ sample:
+ - {"beegfs_host": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E",
+ "meta_data": {"filetype": "ext4", "public": true},
+ "name": "some_volume",
+ "workload_name": "beegfs_metadata",
+ "workload_metadata: {"filetype": "ext4", "public": true},
+ "volume_metadata: {"format_type": "ext4",
+ "format_options": "-i 2048 -I 512 -J size=400 -Odir_index,filetype",
+ "mount_options": "noatime,nodiratime,nobarrier,_netdev",
+ "mount_directory": "/data/beegfs/"},
+ "host_types": ["nvmeof"],
+ "eui": "0000139A3885FA4500A0980000EAA272V",
+ "wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]}
+ snapshot_images:
+ description: snapshot image list that contains identification, capacity, and status information for each
+ snapshot image
+ type: complex
+ sample:
+ - [{"active_cow": true,
+ "creation_method": "user",
+ "id": "34000000600A098000A81B5D00630A965B0535AC",
+ "pit_capacity": "5368709120",
+ "reposity_cap_utilization": "0",
+ "rollback_source": false,
+ "status": "optimal" }]
+ proxy_facts:
+ description: proxy storage system list
+ returned: on successful inquiry from from web services proxy's rest api
+ type: complex
+ contains:
+ ssid:
+ description: storage system id
+ type: str
+ sample: "ec8ed9d2-eba3-4cac-88fb-0954f327f1d4"
+ name:
+ description: storage system name
+ type: str
+ sample: "EF570-NVMe"
+ wwn:
+ description: storage system unique identifier
+ type: str
+ sample: "AC1100051E1E1E1E1E1E1E1E1E1E1E1E"
+ model:
+ description: NetApp E-Series model number
+ type: str
+ sample: "5700"
+ controller:
+ description: controller list that contains identification, ip addresses, and certificate information for
+ each controller
+ type: complex
+ sample: [{"certificateStatus": "selfSigned",
+ "controllerId": "070000000000000000000001",
+ "ipAddresses": ["172.17.0.5", "3.3.3.3"]}]
+ drive_types:
+ description: all available storage system drive types
+ type: list
+ sample: ["sas", "fibre"]
+ unconfigured_space:
+ description: unconfigured storage system space in bytes
+ type: str
+ sample: "982259020595200"
+ array_status:
+ description: storage system status
+ type: str
+ sample: "optimal"
+ password_status:
+ description: storage system password status
+ type: str
+ sample: "invalid"
+ certificate_status:
+ description: storage system ssl certificate status
+ type: str
+ sample: "untrusted"
+ firmware_version:
+ description: storage system install firmware version
+ type: str
+ sample: "08.50.42.99"
+ chassis_serial:
+ description: storage system chassis serial number
+ type: str
+ sample: "SX0810032"
+ asup_enabled:
+ description: storage system auto-support status
+ type: bool
+ sample: True
+"""
+
+import re
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+except ImportError:
+ ansible_version = 'unknown'
+
+try:
+ from urlparse import urlparse, urlunparse
+except ImportError:
+ from urllib.parse import urlparse, urlunparse
+
+
+class Facts(NetAppESeriesModule):
+ def __init__(self):
+ web_services_version = "02.00.0000.0000"
+ super(Facts, self).__init__(ansible_options={},
+ web_services_version=web_services_version,
+ supports_check_mode=True)
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller references to their labels."""
+ controllers = list()
+ try:
+ rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, str(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[controller] = label
+ i += 1
+
+ return controllers_dict
+
+ def get_array_facts(self):
+ """Extract particular facts from the storage array graph"""
+ facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid)
+ controller_reference_label = self.get_controllers()
+ array_facts = None
+ hardware_inventory_facts = None
+
+ # Get the storage array graph
+ try:
+ rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
+
+ # Get the storage array hardware inventory
+ try:
+ rc, hardware_inventory_facts = self.request("storage-systems/%s/hardware-inventory" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to obtain hardware inventory from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
+
+ facts['netapp_storage_array'] = dict(
+ name=array_facts['sa']['saData']['storageArrayLabel'],
+ chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'],
+ firmware=array_facts['sa']['saData']['fwVersion'],
+ wwn=array_facts['sa']['saData']['saId']['worldWideName'],
+ segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'],
+ cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes'])
+
+ facts['netapp_controllers'] = [
+ dict(
+ name=controller_reference_label[controller['controllerRef']],
+ serial=controller['serialNumber'].strip(),
+ status=controller['status'],
+ ) for controller in array_facts['controller']]
+
+ facts['netapp_hosts'] = [
+ dict(
+ group_id=host['clusterRef'],
+ hosts_reference=host['hostRef'],
+ id=host['id'],
+ name=host['name'],
+ host_type_index=host['hostTypeIndex'],
+ ports=host['hostSidePorts']
+ ) for host in array_facts['storagePoolBundle']['host']]
+
+ facts['netapp_host_groups'] = [
+ dict(
+ id=group['id'],
+ name=group['name'],
+ hosts=[host['name'] for host in facts['netapp_hosts'] if host['group_id'] == group['id']]
+ ) for group in array_facts['storagePoolBundle']['cluster']]
+ facts['netapp_host_groups'].append(dict(
+ id='0000000000000000000000000000000000000000',
+ name='default_hostgroup',
+ hosts=[host["name"] for host in facts['netapp_hosts'] if host['group_id'] == '0000000000000000000000000000000000000000']))
+
+ facts['netapp_host_types'] = [
+ dict(
+ type=host_type['hostType'],
+ index=host_type['index']
+ ) for host_type in array_facts['sa']['hostSpecificVals']
+ if 'hostType' in host_type.keys() and host_type['hostType']
+ # This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared.
+ ]
+
+ facts['snapshot_images'] = [
+ dict(
+ id=snapshot['id'],
+ status=snapshot['status'],
+ pit_capacity=snapshot['pitCapacity'],
+ creation_method=snapshot['creationMethod'],
+ reposity_cap_utilization=snapshot['repositoryCapacityUtilization'],
+ active_cow=snapshot['activeCOW'],
+ rollback_source=snapshot['isRollbackSource']
+ ) for snapshot in array_facts['highLevelVolBundle']['pit']]
+
+ facts['netapp_disks'] = [
+ dict(
+ id=disk['id'],
+ available=disk['available'],
+ media_type=disk['driveMediaType'],
+ status=disk['status'],
+ usable_bytes=disk['usableCapacity'],
+ tray_ref=disk['physicalLocation']['trayRef'],
+ product_id=disk['productID'],
+ firmware_version=disk['firmwareVersion'],
+ serial_number=disk['serialNumber'].lstrip()
+ ) for disk in array_facts['drive']]
+
+ facts['netapp_management_interfaces'] = [
+ dict(controller=controller_reference_label[controller['controllerRef']],
+ name=iface['ethernet']['interfaceName'],
+ alias=iface['ethernet']['alias'],
+ channel=iface['ethernet']['channel'],
+ mac_address=iface['ethernet']['macAddr'],
+ remote_ssh_access=iface['ethernet']['rloginEnabled'],
+ link_status=iface['ethernet']['linkStatus'],
+ ipv4_enabled=iface['ethernet']['ipv4Enabled'],
+ ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""),
+ ipv4_address=iface['ethernet']['ipv4Address'],
+ ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'],
+ ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'],
+ ipv6_enabled=iface['ethernet']['ipv6Enabled'],
+ dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'],
+ dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers']
+ if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []),
+ ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'],
+ ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers']
+ if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else [])
+ ) for controller in array_facts['controller'] for iface in controller['netInterfaces']]
+
+ facts['netapp_hostside_interfaces'] = [
+ dict(
+ fc=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['fibre']['channel'],
+ link_status=iface['fibre']['linkStatus'],
+ current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'fc'],
+ ib=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['ib']['channel'],
+ link_status=iface['ib']['linkState'],
+ mtu=iface['ib']['maximumTransmissionUnit'],
+ current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'ib'],
+ iscsi=[dict(controller=controller_reference_label[controller['controllerRef']],
+ iqn=iface['iscsi']['iqn'],
+ link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'],
+ ipv4_enabled=iface['iscsi']['ipv4Enabled'],
+ ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'],
+ ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'],
+ ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'],
+ ipv6_enabled=iface['iscsi']['ipv6Enabled'],
+ mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'],
+ current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData']
+ ['ethernetData']['currentInterfaceSpeed']),
+ supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData']
+ ['ethernetData']
+ ['supportedInterfaceSpeeds']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'iscsi' and iface['iscsi']['interfaceData']['type'] == 'ethernet'],
+ sas=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['sas']['channel'],
+ current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']),
+ link_status=iface['sas']['iocPort']['state'])
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'sas'])]
+
+ facts['netapp_driveside_interfaces'] = [
+ dict(
+ controller=controller_reference_label[controller['controllerRef']],
+ interface_type=interface['interfaceType'],
+ interface_speed=strip_interface_speed(
+ interface[interface['interfaceType']]['maximumInterfaceSpeed']
+ if (interface['interfaceType'] == 'sata' or
+ interface['interfaceType'] == 'sas' or
+ interface['interfaceType'] == 'fibre')
+ else (
+ interface[interface['interfaceType']]['currentSpeed']
+ if interface['interfaceType'] == 'ib'
+ else (
+ interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed']
+ if interface['interfaceType'] == 'iscsi' else 'unknown'
+ ))),
+ )
+ for controller in array_facts['controller']
+ for interface in controller['driveInterfaces']]
+
+ facts['netapp_storage_pools'] = [
+ dict(
+ id=storage_pool['id'],
+ name=storage_pool['name'],
+ available_capacity=storage_pool['freeSpace'],
+ total_capacity=storage_pool['totalRaidedSpace'],
+ used_capacity=storage_pool['usedSpace']
+ ) for storage_pool in array_facts['volumeGroup']]
+
+ all_volumes = list(array_facts['volume'])
+
+ facts['netapp_volumes'] = [
+ dict(
+ id=v['id'],
+ name=v['name'],
+ parent_storage_pool_id=v['volumeGroupRef'],
+ capacity=v['capacity'],
+ is_thin_provisioned=v['thinProvisioned'],
+ workload=v['metadata'],
+
+ ) for v in all_volumes]
+
+ lun_mappings = dict()
+ for host in facts['netapp_hosts']:
+ lun_mappings.update({host["name"]: []})
+ for host in facts['netapp_host_groups']:
+ lun_mappings.update({host["name"]: []})
+
+ facts['netapp_default_hostgroup_access_volume_lun'] = None
+ for lun in [a['lun'] for a in array_facts['storagePoolBundle']['lunMapping']
+ if a['type'] == 'all' and a['mapRef'] == '0000000000000000000000000000000000000000']:
+ facts['netapp_default_hostgroup_access_volume_lun'] = lun
+
+ # Get all host mappings
+ host_mappings = dict()
+ for host_mapping in [h for h in array_facts['storagePoolBundle']['lunMapping'] if h['type'] == 'host']:
+ for host_name in [h['name'] for h in facts['netapp_hosts'] if h['id'] == host_mapping['mapRef']]:
+ for volume in [v['name'] for v in facts['netapp_volumes'] if v['id'] == host_mapping['volumeRef']]:
+ if host_name in host_mappings.keys():
+ host_mappings[host_name].append((volume, host_mapping['lun']))
+ else:
+ host_mappings[host_name] = [(volume, host_mapping['lun'])]
+
+ # Get all host group mappings
+ group_mappings = dict()
+ for group_mapping in [h for h in array_facts['storagePoolBundle']['lunMapping'] if h['type'] == 'cluster']:
+ for group_name, group_hosts in [(g['name'], g['hosts']) for g in facts['netapp_host_groups'] if g['id'] == group_mapping['mapRef']]:
+ for volume in [v['name'] for v in facts['netapp_volumes'] if v['id'] == group_mapping['volumeRef']]:
+ if group_name in group_mappings.keys():
+ group_mappings[group_name].append((volume, group_mapping['lun']))
+ else:
+ group_mappings[group_name] = [(volume, group_mapping['lun'])]
+
+ for host_name in [h for h in group_hosts if h in host_mappings.keys()]:
+ if host_name in host_mappings.keys():
+ host_mappings[host_name].append((volume, group_mapping['lun']))
+ else:
+ host_mappings[host_name] = [(volume, group_mapping['lun'])]
+
+ facts['netapp_luns_by_target'] = lun_mappings
+ if host_mappings:
+ facts['netapp_luns_by_target'].update(host_mappings)
+ if group_mappings:
+ facts['netapp_luns_by_target'].update(group_mappings)
+
+ # Add all host mappings to respective groups mappings
+ for host_group in facts['netapp_host_groups']:
+ group_name = host_group['name']
+ for host in host_group['hosts']:
+ facts['netapp_luns_by_target'][group_name].extend(facts['netapp_luns_by_target'][host])
+
+ # Remove duplicate entries
+ for obj in facts['netapp_luns_by_target'].keys():
+ tmp = dict(facts['netapp_luns_by_target'][obj])
+ facts['netapp_luns_by_target'][obj] = [(k, tmp[k]) for k in tmp.keys()]
+
+ workload_tags = None
+ try:
+ rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid)
+
+ facts['netapp_workload_tags'] = [
+ dict(
+ id=workload_tag['id'],
+ name=workload_tag['name'],
+ attributes=workload_tag['workloadAttributes']
+ ) for workload_tag in workload_tags]
+
+ targets = array_facts["storagePoolBundle"]["target"]
+
+ facts['netapp_hostside_io_interfaces'] = []
+ if "ioInterface" in array_facts:
+ for interface in array_facts["ioInterface"]:
+
+ # Select only the host side channels
+ if interface["channelType"] == "hostside":
+ interface_type = interface["ioInterfaceTypeData"]["interfaceType"]
+ interface_data = interface["ioInterfaceTypeData"]["fibre" if interface_type == "fc" else interface_type]
+ command_protocol_properties = interface["commandProtocolPropertiesList"]["commandProtocolProperties"]
+
+ # Build generic information for each interface entry
+ interface_info = {"protocol": "unknown",
+ "interface_reference": interface_data["interfaceRef"],
+ "controller_reference": interface["controllerRef"],
+ "channel_port_reference": interface_data["channelPortRef"] if "channelPortRef" in interface_data else "",
+ "controller": controller_reference_label[interface["controllerRef"]],
+ "channel": interface_data["channel"],
+ "part": "unknown",
+ "link_status": "unknown",
+ "speed": {"current": "unknown", "maximum": "unknown", "supported": []},
+ "mtu": None,
+ "guid": None,
+ "lid": None,
+ "nqn": None,
+ "iqn": None,
+ "wwpn": None,
+ "ipv4": None, # enabled, config_method, address, subnet, gateway
+ "ipv6": None} # for expansion if needed
+
+ # Add target information
+ for target in targets:
+ if target["nodeName"]["ioInterfaceType"] == "nvmeof":
+ interface_info.update({"nqn": target["nodeName"]["nvmeNodeName"]})
+ if target["nodeName"]["ioInterfaceType"] == "iscsi":
+ interface_info.update({"iqn": target["nodeName"]["iscsiNodeName"]})
+
+ # iSCSI IO interface
+ if interface_type == "iscsi":
+ interface_info.update({"ipv4": {"enabled": interface_data["ipv4Enabled"],
+ "config_method": interface_data["ipv4Data"]["ipv4AddressConfigMethod"],
+ "address": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4Address"],
+ "subnet": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4SubnetMask"],
+ "gateway": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4GatewayAddress"]}})
+
+ # InfiniBand (iSER) protocol
+ if interface_data["interfaceData"]["type"] == "infiniband" and interface_data["interfaceData"]["infinibandData"]["isIser"]:
+ interface_info.update({"protocol": "ib_iser"})
+
+ # Get more details from hardware-inventory
+ for ib_port in hardware_inventory_facts["ibPorts"]:
+ if ib_port["channelPortRef"] == interface_info["channel_port_reference"]:
+ interface_info.update({"link_status": ib_port["linkState"],
+ "guid": ib_port["globalIdentifier"],
+ "lid": ib_port["localIdentifier"],
+ "speed": {"current": strip_interface_speed(ib_port["currentSpeed"]),
+ "maximum": strip_interface_speed(ib_port["supportedSpeed"])[-1],
+ "supported": strip_interface_speed(ib_port["supportedSpeed"])}})
+
+
+ # iSCSI protocol
+ elif interface_data["interfaceData"]["type"] == "ethernet":
+ ethernet_data = interface_data["interfaceData"]["ethernetData"]
+ interface_info.update({"protocol": "iscsi"})
+ interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]),
+ "link_status": ethernet_data["linkStatus"],
+ "mtu": ethernet_data["maximumFramePayloadSize"],
+ "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]),
+ "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}})
+
+ # Fibre Channel IO interface
+ elif interface_type == "fc":
+ interface_info.update({"wwpn": interface_data["addressId"],
+ "part": interface_data["part"],
+ "link_status": interface_data["linkStatus"],
+ "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]),
+ "supported": "unknown"}})
+
+ # NVMe over fibre channel protocol
+ if (command_protocol_properties and command_protocol_properties[0]["commandProtocol"] == "nvme" and
+ command_protocol_properties[0]["nvmeProperties"]["commandSet"] == "nvmeof" and
+ command_protocol_properties[0]["nvmeProperties"]["nvmeofProperties"]["fcProperties"]):
+ interface_info.update({"protocol": "nvme_fc"})
+
+ # Fibre channel protocol
+ else:
+ interface_info.update({"protocol": "fc"})
+
+ # SAS IO interface
+ elif interface_type == "sas":
+ interface_info.update({"protocol": "sas",
+ "wwpn": interface_data["addressId"],
+ "part": interface_data["part"],
+ "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]),
+ "supported": "unknown"}})
+
+ # Infiniband IO interface
+ elif interface_type == "ib":
+ interface_info.update({"link_status": interface_data["linkState"],
+ "speed": {"current": strip_interface_speed(interface_data["currentSpeed"]),
+ "maximum": strip_interface_speed(interface_data["supportedSpeed"])[-1],
+ "supported": strip_interface_speed(interface_data["supportedSpeed"])},
+ "mtu": interface_data["maximumTransmissionUnit"],
+ "guid": interface_data["globalIdentifier"],
+ "lid": interface_data["localIdentifier"]})
+
+ # Determine protocol (NVMe over Infiniband, InfiniBand iSER, InfiniBand SRP)
+ if interface_data["isNVMeSupported"]:
+ interface_info.update({"protocol": "nvme_ib"})
+ elif interface_data["isISERSupported"]:
+ interface_info.update({"protocol": "ib_iser"})
+ elif interface_data["isSRPSupported"]:
+ interface_info.update({"protocol": "ib_srp"})
+
+ # Determine command protocol information
+ if command_protocol_properties:
+ for command_protocol_property in command_protocol_properties:
+ if command_protocol_property["commandProtocol"] == "nvme":
+ if command_protocol_property["nvmeProperties"]["commandSet"] == "nvmeof":
+ ip_address_data = command_protocol_property["nvmeProperties"]["nvmeofProperties"]["ibProperties"]["ipAddressData"]
+ if ip_address_data["addressType"] == "ipv4":
+ interface_info.update({"ipv4": {"enabled": True,
+ "config_method": "configStatic",
+ "address": ip_address_data["ipv4Data"]["ipv4Address"],
+ "subnet": ip_address_data["ipv4Data"]["ipv4SubnetMask"],
+ "gateway": ip_address_data["ipv4Data"]["ipv4GatewayAddress"]}})
+
+ elif command_protocol_property["commandProtocol"] == "scsi":
+ if command_protocol_property["scsiProperties"]["scsiProtocolType"] == "iser":
+ ipv4_data = command_protocol_property["scsiProperties"]["iserProperties"]["ipv4Data"]
+ interface_info.update({"ipv4": {"enabled": True,
+ "config_method": ipv4_data["ipv4AddressConfigMethod"],
+ "address": ipv4_data["ipv4AddressData"]["ipv4Address"],
+ "subnet": ipv4_data["ipv4AddressData"]["ipv4SubnetMask"],
+ "gateway": ipv4_data["ipv4AddressData"]["ipv4GatewayAddress"]}})
+
+ # Ethernet IO interface
+ elif interface_type == "ethernet":
+ ethernet_data = interface_data["interfaceData"]["ethernetData"]
+ interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]),
+ "link_status": ethernet_data["linkStatus"],
+ "mtu": ethernet_data["maximumFramePayloadSize"],
+ "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]),
+ "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]),
+ "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}})
+
+ # Determine command protocol information
+ if command_protocol_properties:
+ for command_protocol_property in command_protocol_properties:
+ if command_protocol_property["commandProtocol"] == "nvme":
+ if command_protocol_property["nvmeProperties"]["commandSet"] == "nvmeof":
+
+ nvmeof_properties = command_protocol_property["nvmeProperties"]["nvmeofProperties"]
+ if nvmeof_properties["provider"] == "providerRocev2":
+ ipv4_data = nvmeof_properties["roceV2Properties"]["ipv4Data"]
+ interface_info.update({"protocol": "nvme_roce"})
+ interface_info.update({"ipv4": {"enabled": nvmeof_properties["roceV2Properties"]["ipv4Enabled"],
+ "config_method": ipv4_data["ipv4AddressConfigMethod"],
+ "address": ipv4_data["ipv4AddressData"]["ipv4Address"],
+ "subnet": ipv4_data["ipv4AddressData"]["ipv4SubnetMask"],
+ "gateway": ipv4_data["ipv4AddressData"]["ipv4GatewayAddress"]}})
+
+ facts['netapp_hostside_io_interfaces'].append(interface_info)
+
+ # Create a dictionary of volume lists keyed by host names
+ facts['netapp_volumes_by_initiators'] = dict()
+ for mapping in array_facts['storagePoolBundle']['lunMapping']:
+ for host in facts['netapp_hosts']:
+ if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']:
+ if host['name'] not in facts['netapp_volumes_by_initiators'].keys():
+ facts['netapp_volumes_by_initiators'].update({host['name']: []})
+
+ # Determine host io interface protocols
+ host_types = [port['type'] for port in host['ports']]
+ hostside_io_interface_protocols = []
+ host_port_protocols = []
+ host_port_information = {}
+ for interface in facts['netapp_hostside_io_interfaces']:
+ hostside_io_interface_protocols.append(interface["protocol"])
+ for host_type in host_types:
+ if host_type == "iscsi" and interface["protocol"] in ["iscsi", "ib_iser"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+ elif host_type == "fc" and interface["protocol"] in ["fc"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+ elif host_type == "sas" and interface["protocol"] in ["sas"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+ elif host_type == "ib" and interface["protocol"] in ["ib_iser", "ib_srp"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+ elif host_type == "nvmeof" and interface["protocol"] in ["nvme_ib", "nvme_fc", "nvme_roce"]:
+ host_port_protocols.append(interface["protocol"])
+ if interface["protocol"] in host_port_information:
+ host_port_information[interface["protocol"]].append(interface)
+ else:
+ host_port_information.update({interface["protocol"]: [interface]})
+
+ for volume in all_volumes:
+
+ storage_pool = [pool["name"] for pool in facts['netapp_storage_pools'] if pool["id"] == volume["volumeGroupRef"]][0]
+
+ if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]:
+
+ # Determine workload name if there is one
+ workload_name = ""
+ metadata = dict()
+ for volume_tag in volume['metadata']:
+ if volume_tag['key'] == 'workloadId':
+ for workload_tag in facts['netapp_workload_tags']:
+ if volume_tag['value'] == workload_tag['id']:
+ workload_name = workload_tag['name']
+ metadata = dict((entry['key'], entry['value'])
+ for entry in workload_tag['attributes']
+ if entry['key'] != 'profileId')
+
+ # Get volume specific metadata tags
+ volume_metadata_raw = dict()
+ volume_metadata = dict()
+ for entry in volume['metadata']:
+ volume_metadata_raw.update({entry["key"]: entry["value"]})
+
+ for sorted_key in sorted(volume_metadata_raw.keys()):
+ if re.match(".*~[0-9]$", sorted_key):
+ key = re.sub("~[0-9]$", "", sorted_key)
+ if key in volume_metadata:
+ volume_metadata[key] = volume_metadata[key] + volume_metadata_raw[sorted_key]
+ else:
+ volume_metadata.update({key: volume_metadata_raw[sorted_key]})
+ else:
+ volume_metadata.update({sorted_key: volume_metadata_raw[sorted_key]})
+
+ # Determine drive count
+ stripe_count = 0
+ vg_drive_num = sum(1 for d in array_facts['drive'] if d['currentVolumeGroupRef'] == volume['volumeGroupRef'] and not d['hotSpare'])
+
+ if volume['raidLevel'] == "raidDiskPool":
+ stripe_count = 8
+ if volume['raidLevel'] == "raid0":
+ stripe_count = vg_drive_num
+ if volume['raidLevel'] == "raid1":
+ stripe_count = int(vg_drive_num / 2)
+ if volume['raidLevel'] in ["raid3", "raid5"]:
+ stripe_count = vg_drive_num - 1
+ if volume['raidLevel'] == "raid6":
+ stripe_count = vg_drive_num - 2
+ facts['netapp_volumes_by_initiators'][host['name']].append(
+ dict(name=volume['name'],
+ storage_pool=storage_pool,
+ host_types=set(host_types),
+ host_port_information=host_port_information,
+ host_port_protocols=set(host_port_protocols),
+ hostside_io_interface_protocols=set(hostside_io_interface_protocols),
+ id=volume['id'],
+ wwn=volume['wwn'],
+ eui=volume['extendedUniqueIdentifier'],
+ workload_name=workload_name,
+ workload_metadata=metadata,
+ meta_data=metadata,
+ volume_metadata=volume_metadata,
+ raid_level=volume['raidLevel'],
+ segment_size_kb=int(volume['segmentSize'] / 1024),
+ stripe_count=stripe_count))
+
+ features = [feature for feature in array_facts['sa']['capabilities']]
+ features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures']
+ if feature['isEnabled']])
+ features = list(set(features)) # ensure unique
+ features.sort()
+ facts['netapp_enabled_features'] = features
+
+ return facts
+
+ def get_facts(self):
+ """Get the embedded or web services proxy information."""
+ facts = self.get_array_facts()
+
+ facts_from_proxy = not self.is_embedded()
+ facts.update({"facts_from_proxy": facts_from_proxy})
+
+ self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid,
+ storage_array_facts=facts)
+
+
+def strip_interface_speed(speed):
+ """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
+ if isinstance(speed, list):
+ result = [re.match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
+ result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
+ result = ["auto" if re.match(r"auto", sp) else sp for sp in result]
+ else:
+ result = re.match(r"speed[0-9]{1,3}[gm]", speed)
+ result = result.group().replace("speed", "") if result else "unknown"
+ result = "auto" if re.match(r"auto", result.lower()) else result
+ return result
+
+
+def main():
+ facts = Facts()
+ facts.get_facts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py
new file mode 100644
index 00000000..caea3d1d
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py
@@ -0,0 +1,613 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_firmware
+short_description: NetApp E-Series manage firmware.
+description:
+ - Ensure specific firmware versions are activated on E-Series storage system.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ nvsram:
+ description:
+ - Path to the NVSRAM file.
+ - NetApp recommends upgrading the NVSRAM when upgrading firmware.
+ - Due to concurrency issues, use M(netapp_eseries.santricity.na_santricity_proxy_firmware_upload) to upload firmware and nvsram to SANtricity Web Services Proxy when
+ upgrading multiple systems at the same time on the same instance of the proxy.
+ type: str
+ required: false
+ firmware:
+ description:
+ - Path to the firmware file.
+ - Due to concurrency issues, use M(netapp_eseries.santricity.na_santricity_proxy_firmware_upload) to upload firmware and nvsram to SANtricity Web Services Proxy when
+ upgrading multiple systems at the same time on the same instance of the proxy.
+ type: str
+ required: True
+ wait_for_completion:
+ description:
+ - This flag will cause module to wait for any upgrade actions to complete.
+ - When changes are required to both firmware and nvsram and task is executed against SANtricity Web Services Proxy,
+ the firmware will have to complete before nvsram can be installed.
+ type: bool
+ default: false
+ clear_mel_events:
+ description:
+ - This flag will force firmware to be activated in spite of the storage system mel-event issues.
+ - Warning! This will clear all storage system mel-events. Use at your own risk!
+ type: bool
+ default: false
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ na_santricity_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ nvsram: "path/to/nvsram"
+ firmware: "path/to/bundle"
+ wait_for_completion: true
+ clear_mel_events: true
+- name: Ensure correct firmware versions
+ na_santricity_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ nvsram: "path/to/nvsram"
+ firmware: "path/to/firmware"
+"""
+RETURN = """
+msg:
+ description: Status and version of firmware and NVSRAM.
+ type: str
+ returned: always
+ sample:
+"""
+import os
+import multiprocessing
+import threading
+
+from time import sleep
+from ansible.module_utils import six
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesFirmware(NetAppESeriesModule):
+ COMPATIBILITY_CHECK_TIMEOUT_SEC = 60
+ REBOOT_TIMEOUT_SEC = 30 * 60
+ MINIMUM_PROXY_VERSION = "04.10.00.0000"
+
+ def __init__(self):
+ ansible_options = dict(
+ nvsram=dict(type="str", required=False),
+ firmware=dict(type="str", required=True),
+ wait_for_completion=dict(type="bool", default=False),
+ clear_mel_events=dict(type="bool", default=False))
+
+ super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.nvsram = args["nvsram"]
+ self.firmware = args["firmware"]
+ self.wait_for_completion = args["wait_for_completion"]
+ self.clear_mel_events = args["clear_mel_events"]
+
+ self.nvsram_name = None
+ self.firmware_name = None
+ self.is_bundle_cache = None
+ self.firmware_version_cache = None
+ self.nvsram_version_cache = None
+ self.upgrade_required = False
+ self.upgrade_in_progress = False
+ self.module_info = dict()
+
+ if self.nvsram:
+ self.nvsram_name = os.path.basename(self.nvsram)
+ if self.firmware:
+ self.firmware_name = os.path.basename(self.firmware)
+
+ self.last_known_event = -1
+ self.is_firmware_activation_started_mel_event_count = 1
+ self.is_nvsram_download_completed_mel_event_count = 1
+ self.proxy_wait_for_upgrade_mel_event_count = 1
+
+ def is_upgrade_in_progress(self):
+ """Determine whether an upgrade is already in progress."""
+ in_progress = False
+
+ if self.is_proxy():
+ try:
+ rc, status = self.request("storage-systems/%s/cfw-upgrade" % self.ssid)
+ in_progress = status["running"]
+ except Exception as error:
+ if "errorMessage" in to_native(error):
+ self.module.warn("Failed to retrieve upgrade status. Array [%s]. Error [%s]." % (self.ssid, error))
+ in_progress = False
+ else:
+ self.module.fail_json(msg="Failed to retrieve upgrade status. Array [%s]. Error [%s]." % (self.ssid, error))
+ else:
+ in_progress = False
+
+ return in_progress
+
+ def is_firmware_bundled(self):
+ """Determine whether supplied firmware is bundle."""
+ if self.is_bundle_cache is None:
+ with open(self.firmware, "rb") as fh:
+ signature = fh.read(16).lower()
+
+ if b"firmware" in signature:
+ self.is_bundle_cache = False
+ elif b"combined_content" in signature:
+ self.is_bundle_cache = True
+ else:
+ self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid))
+
+ return self.is_bundle_cache
+
+ def firmware_version(self):
+ """Retrieve firmware version of the firmware file. Return: bytes string"""
+ if self.firmware_version_cache is None:
+
+ # Search firmware file for bundle or firmware version
+ with open(self.firmware, "rb") as fh:
+ line = fh.readline()
+ while line:
+ if self.is_firmware_bundled():
+ if b'displayableAttributeList=' in line:
+ for item in line[25:].split(b','):
+ key, value = item.split(b"|")
+ if key == b'VERSION':
+ self.firmware_version_cache = value.strip(b"\n")
+ break
+ elif b"Version:" in line:
+ self.firmware_version_cache = line.split()[-1].strip(b"\n")
+ break
+ line = fh.readline()
+ else:
+ self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid))
+ return self.firmware_version_cache
+
+ def nvsram_version(self):
+ """Retrieve NVSRAM version of the NVSRAM file. Return: byte string"""
+ if self.nvsram_version_cache is None:
+
+ with open(self.nvsram, "rb") as fh:
+ line = fh.readline()
+ while line:
+ if b".NVSRAM Configuration Number" in line:
+ self.nvsram_version_cache = line.split(b'"')[-2]
+ break
+ line = fh.readline()
+ else:
+ self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid))
+ return self.nvsram_version_cache
+
+ def check_system_health(self):
+ """Ensure E-Series storage system is healthy. Works for both embedded and proxy web services."""
+ try:
+ rc, response = self.request("storage-systems/%s/health-check" % self.ssid, method="POST")
+ return response["successful"]
+ except Exception as error:
+ self.module.fail_json(msg="Health check failed! Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ def embedded_check_compatibility(self):
+ """Verify files are compatible with E-Series storage system."""
+ if self.nvsram:
+ self.embedded_check_nvsram_compatibility()
+ if self.firmware:
+ self.embedded_check_bundle_compatibility()
+
+ def embedded_check_nvsram_compatibility(self):
+ """Verify the provided NVSRAM is compatible with E-Series storage system."""
+ files = [("nvsramimage", self.nvsram_name, self.nvsram)]
+ headers, data = create_multipart_formdata(files=files)
+ compatible = {}
+ try:
+ rc, compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid, method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ if not compatible["signatureTestingPassed"]:
+ self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram)
+ if not compatible["fileCompatible"]:
+ self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram)
+
+ # Determine whether nvsram is required
+ for module in compatible["versionContents"]:
+ if module["bundledVersion"] != module["onboardVersion"]:
+ self.upgrade_required = True
+
+ # Update bundle info
+ self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
+
+ def embedded_check_bundle_compatibility(self):
+ """Verify the provided firmware bundle is compatible with E-Series storage system."""
+ files = [("files[]", "blob", self.firmware)]
+ headers, data = create_multipart_formdata(files=files, send_8kb=True)
+ compatible = {}
+ try:
+ rc, compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid, method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ # Determine whether valid and compatible firmware
+ if not compatible["signatureTestingPassed"]:
+ self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware)
+ if not compatible["fileCompatible"]:
+ self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware)
+
+ # Determine whether upgrade is required
+ for module in compatible["versionContents"]:
+ bundle_module_version = module["bundledVersion"].split(".")
+ onboard_module_version = module["onboardVersion"].split(".")
+ version_minimum_length = min(len(bundle_module_version), len(onboard_module_version))
+
+ if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]:
+ self.upgrade_required = True
+
+ # Check whether downgrade is being attempted
+ bundle_version = module["bundledVersion"].split(".")[:2]
+ onboard_version = module["onboardVersion"].split(".")[:2]
+ if bundle_version[0] < onboard_version[0] or (bundle_version[0] == onboard_version[0] and bundle_version[1] < onboard_version[1]):
+ self.module.fail_json(msg="Downgrades are not permitted. onboard [%s] > bundled[%s]."
+ % (module["onboardVersion"], module["bundledVersion"]))
+
+ # Update bundle info
+ self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
+
+ def embedded_firmware_activate(self):
+ """Activate firmware."""
+ rc, response = self.request("firmware/embedded-firmware/activate", method="POST", ignore_errors=True, timeout=10)
+ if rc == "422":
+ self.module.fail_json(msg="Failed to activate the staged firmware. Array Id [%s]. Error [%s]" % (self.ssid, response))
+
+ def embedded_firmware_download(self):
+ """Execute the firmware download."""
+ if self.nvsram:
+ firmware_url = "firmware/embedded-firmware?nvsram=true&staged=true"
+ headers, data = create_multipart_formdata(files=[("nvsramfile", self.nvsram_name, self.nvsram),
+ ("dlpfile", self.firmware_name, self.firmware)])
+ else:
+ firmware_url = "firmware/embedded-firmware?nvsram=false&staged=true"
+ headers, data = create_multipart_formdata(files=[("dlpfile", self.firmware_name, self.firmware)])
+
+ # Stage firmware and nvsram
+ try:
+
+ rc, response = self.request(firmware_url, method="POST", data=data, headers=headers, timeout=(30 * 60))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to stage firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ # Activate firmware
+ activate_thread = threading.Thread(target=self.embedded_firmware_activate)
+ activate_thread.start()
+ self.wait_for_reboot()
+
+ def wait_for_reboot(self):
+ """Wait for controller A to fully reboot and web services running"""
+ reboot_started = False
+ reboot_completed = False
+ self.module.log("Controller firmware: Reboot commencing. Array Id [%s]." % self.ssid)
+ while self.wait_for_completion and not (reboot_started and reboot_completed):
+ try:
+ rc, response = self.request("storage-systems/%s/symbol/pingController?controller=a&verboseErrorResponse=true"
+ % self.ssid, method="POST", timeout=10, log_request=False)
+
+ if reboot_started and response == "ok":
+ self.module.log("Controller firmware: Reboot completed. Array Id [%s]." % self.ssid)
+ reboot_completed = True
+ sleep(2)
+ except Exception as error:
+ if not reboot_started:
+ self.module.log("Controller firmware: Reboot started. Array Id [%s]." % self.ssid)
+ reboot_started = True
+ continue
+
+ def firmware_event_logger(self):
+ """Determine if firmware activation has started."""
+ # Determine the last known event
+ try:
+ rc, events = self.request("storage-systems/%s/events" % self.ssid)
+ for event in events:
+ if int(event["eventNumber"]) > int(self.last_known_event):
+ self.last_known_event = event["eventNumber"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine last known event. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ while True:
+ try:
+ rc, events = self.request("storage-systems/%s/events?lastKnown=%s&wait=1" % (self.ssid, self.last_known_event), log_request=False)
+ for event in events:
+ if int(event["eventNumber"]) > int(self.last_known_event):
+ self.last_known_event = event["eventNumber"]
+
+ # Log firmware events
+ if event["eventType"] == "firmwareDownloadEvent":
+ self.module.log("%s" % event["status"])
+ if event["status"] == "informational" and event["statusMessage"]:
+ self.module.log("Controller firmware: %s Array Id [%s]." % (event["statusMessage"], self.ssid))
+
+ # When activation is successful, finish thread
+ if event["status"] == "activate_success":
+ self.module.log("Controller firmware activated. Array Id [%s]." % self.ssid)
+ return
+ except Exception as error:
+ pass
+
+ def wait_for_web_services(self):
+ """Wait for web services to report firmware and nvsram upgrade."""
+ # Wait for system to reflect changes
+ for count in range(int(self.REBOOT_TIMEOUT_SEC / 5)):
+ try:
+ if self.is_firmware_bundled():
+ firmware_rc, firmware_version = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/"
+ "codeVersions[codeModule='bundleDisplay']" % self.ssid, log_request=False)
+ current_firmware_version = six.b(firmware_version[0]["versionString"])
+ else:
+ firmware_rc, firmware_version = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion"
+ % self.ssid, log_request=False)
+ current_firmware_version = six.b(firmware_version[0])
+
+ nvsram_rc, nvsram_version = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid, log_request=False)
+ current_nvsram_version = six.b(nvsram_version[0])
+
+ if current_firmware_version == self.firmware_version() and (not self.nvsram or current_nvsram_version == self.nvsram_version()):
+ break
+ except Exception as error:
+ pass
+ sleep(5)
+ else:
+ self.module.fail_json(msg="Timeout waiting for Santricity Web Services. Array [%s]" % self.ssid)
+
+ # Wait for system to be optimal
+ for count in range(int(self.REBOOT_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("storage-systems/%s" % self.ssid, log_request=False)
+
+ if response["status"] == "optimal":
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ pass
+ sleep(5)
+ else:
+ self.module.fail_json(msg="Timeout waiting for storage system to return to optimal status. Array [%s]" % self.ssid)
+
+ def embedded_upgrade(self):
+ """Upload and activate both firmware and NVSRAM."""
+ download_thread = threading.Thread(target=self.embedded_firmware_download)
+ event_thread = threading.Thread(target=self.firmware_event_logger)
+ download_thread.start()
+ event_thread.start()
+ download_thread.join()
+ event_thread.join()
+
+ def proxy_check_nvsram_compatibility(self, retries=10):
+ """Verify nvsram is compatible with E-Series storage system."""
+ self.module.log("Checking nvsram compatibility...")
+ data = {"storageDeviceIds": [self.ssid]}
+ try:
+ rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
+ except Exception as error:
+ if retries:
+ sleep(1)
+ self.proxy_check_nvsram_compatibility(retries - 1)
+ else:
+ self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ for count in range(int(self.COMPATIBILITY_CHECK_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
+ except Exception as error:
+ continue
+
+ if not response["checkRunning"]:
+ for result in response["results"][0]["nvsramFiles"]:
+ if result["filename"] == self.nvsram_name:
+ return
+ self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid))
+ sleep(5)
+
+ self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]." % self.ssid)
+
+ def proxy_check_firmware_compatibility(self, retries=10):
+ """Verify firmware is compatible with E-Series storage system."""
+ check = {}
+ try:
+ rc, check = self.request("firmware/compatibility-check", method="POST", data={"storageDeviceIds": [self.ssid]})
+ except Exception as error:
+ if retries:
+ sleep(1)
+ self.proxy_check_firmware_compatibility(retries - 1)
+ else:
+ self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ for count in range(int(self.COMPATIBILITY_CHECK_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
+ except Exception as error:
+ continue
+
+ if not response["checkRunning"]:
+ for result in response["results"][0]["cfwFiles"]:
+ if result["filename"] == self.firmware_name:
+ return
+ self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid))
+ sleep(5)
+
+ self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]." % self.ssid)
+
+ def proxy_upload_and_check_compatibility(self):
+ """Ensure firmware is uploaded and verify compatibility."""
+ cfw_files = []
+ try:
+ rc, cfw_files = self.request("firmware/cfw-files")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve existing firmware files. Error [%s]" % to_native(error))
+
+ if self.firmware:
+ for cfw_file in cfw_files:
+ if cfw_file["filename"] == self.firmware_name:
+ break
+ else:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", self.firmware_name, self.firmware)]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]."
+ % (self.firmware_name, self.ssid, to_native(error)))
+ self.proxy_check_firmware_compatibility()
+
+ if self.nvsram:
+ for cfw_file in cfw_files:
+ if cfw_file["filename"] == self.nvsram_name:
+ break
+ else:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", self.nvsram_name, self.nvsram)]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]."
+ % (self.nvsram_name, self.ssid, to_native(error)))
+ self.proxy_check_nvsram_compatibility()
+
+ def proxy_check_upgrade_required(self):
+ """Staging is required to collect firmware information from the web services proxy."""
+ # Verify controller consistency and get firmware versions
+ if self.firmware:
+ current_firmware_version = b""
+ try:
+ # Retrieve current bundle version
+ if self.is_firmware_bundled():
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid)
+ current_firmware_version = six.b(response[0]["versionString"])
+ else:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
+ current_firmware_version = six.b(response[0])
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ # Determine whether upgrade is required
+ if current_firmware_version != self.firmware_version():
+
+ current = current_firmware_version.split(b".")[:2]
+ upgrade = self.firmware_version().split(b".")[:2]
+ if current[0] < upgrade[0] or (current[0] == upgrade[0] and current[1] <= upgrade[1]):
+ self.upgrade_required = True
+ else:
+ self.module.fail_json(msg="Downgrades are not permitted. Firmware [%s]. Array [%s]." % (self.firmware, self.ssid))
+
+ # Determine current NVSRAM version and whether change is required
+ if self.nvsram:
+ try:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
+
+ if six.b(response[0]) != self.nvsram_version():
+ self.upgrade_required = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ def proxy_wait_for_upgrade(self):
+ """Wait for SANtricity Web Services Proxy to report upgrade complete"""
+ self.module.log("(Proxy) Waiting for upgrade to complete...")
+
+ status = {}
+ while True:
+ try:
+ rc, status = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, log_request=False, ignore_errors=True)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve firmware upgrade status! Array [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ if "errorMessage" in status:
+ self.module.warn("Proxy reported an error. Checking whether upgrade completed. Array [%s]. Error [%s]." % (self.ssid, status["errorMessage"]))
+ self.wait_for_web_services()
+ break
+
+ if not status["running"]:
+ if status["activationCompletionTime"]:
+ self.upgrade_in_progress = False
+ break
+ else:
+ self.module.fail_json(msg="Failed to complete upgrade. Array [%s]." % self.ssid)
+ sleep(5)
+
+ def delete_mel_events(self):
+ """Clear all mel-events."""
+ try:
+ rc, response = self.request("storage-systems/%s/mel-events?clearCache=true&resetMel=true" % self.ssid, method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to clear mel-events. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def proxy_upgrade(self):
+ """Activate previously uploaded firmware related files."""
+ self.module.log("(Proxy) Firmware upgrade commencing...")
+ body = {"stageFirmware": False, "skipMelCheck": self.clear_mel_events, "cfwFile": self.firmware_name}
+ if self.nvsram:
+ body.update({"nvsramFile": self.nvsram_name})
+
+ try:
+ rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ self.upgrade_in_progress = True
+ if self.wait_for_completion:
+ self.proxy_wait_for_upgrade()
+
+ def apply(self):
+ """Upgrade controller firmware."""
+ if self.is_upgrade_in_progress():
+ self.module.fail_json(msg="Upgrade is already is progress. Array [%s]." % self.ssid)
+
+ if self.is_embedded():
+ self.embedded_check_compatibility()
+ else:
+ if not self.is_web_services_version_met(self.MINIMUM_PROXY_VERSION):
+ self.module.fail_json(msg="Minimum proxy version %s required!")
+ self.proxy_check_upgrade_required()
+
+ # This will upload the firmware files to the web services proxy but not to the controller
+ if self.upgrade_required:
+ self.proxy_upload_and_check_compatibility()
+
+ # Perform upgrade
+ if self.upgrade_required and not self.module.check_mode:
+
+ if self.clear_mel_events:
+ self.delete_mel_events()
+
+ if self.is_embedded():
+ self.embedded_upgrade()
+ else:
+ self.proxy_upgrade()
+
+ self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress)
+
+
+def main():
+ firmware = NetAppESeriesFirmware()
+ firmware.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py
new file mode 100644
index 00000000..a02f2474
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_global
+short_description: NetApp E-Series manage global settings configuration
+description:
+ - Allow the user to configure several of the global settings associated with an E-Series storage-system
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ name:
+ description:
+ - Set the name of the E-Series storage-system
+ - This label/name doesn't have to be unique.
+ - May be up to 30 characters in length.
+ type: str
+ aliases:
+ - label
+ cache_block_size:
+ description:
+ - Size of the cache's block size.
+ - All volumes on the storage system share the same cache space; therefore, the volumes can have only one cache block size.
+ - See M(netapp_eseries.santricity.na_santricity_facts) for available sizes.
+ type: int
+ required: False
+ cache_flush_threshold:
+ description:
+ - This is the percentage threshold of the amount of unwritten data that is allowed to remain on the storage array's cache before flushing.
+ type: int
+ required: False
+ default_host_type:
+ description:
+ - Default host type for the storage system.
+ - Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a
+ host type index which can be found in M(netapp_eseries.santricity.na_santricity_facts)
+ type: str
+ required: False
+ automatic_load_balancing:
+ description:
+ - Enable automatic load balancing to allow incoming traffic from the hosts to be dynamically managed and balanced across both controllers.
+ - Automatic load balancing requires host connectivity reporting to be enabled.
+ type: str
+ choices:
+ - enabled
+ - disabled
+ required: False
+ host_connectivity_reporting:
+ description:
+ - Enable host connectivity reporting to allow host connections to be monitored for connection and multipath driver problems.
+ - When M(automatic_load_balancing==enabled) then M(host_connectivity_reporting) must be enabled
+ type: str
+ choices:
+ - enabled
+ - disabled
+ required: False
+notes:
+ - Check mode is supported.
+ - This module requires Web Services API v1.3 or newer.
+"""
+
+EXAMPLES = """
+ - name: Set the storage-system name
+ na_santricity_global:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: myArrayName
+ cache_block_size: 32768
+ cache_flush_threshold: 80
+ automatic_load_balancing: enabled
+ default_host_type: Linux DM-MP
+ - name: Set the storage-system name
+ na_santricity_global:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: myOtherArrayName
+ cache_block_size: 8192
+ cache_flush_threshold: 60
+ automatic_load_balancing: disabled
+ default_host_type: 28
+"""
+
+RETURN = """
+changed:
+ description: Whether global settings were changed
+ returned: on success
+ type: bool
+ sample: true
+array_name:
+ description: Current storage array's name
+ returned: on success
+ type: str
+ sample: arrayName
+automatic_load_balancing:
+ description: Whether automatic load balancing feature has been enabled
+ returned: on success
+ type: str
+ sample: enabled
+host_connectivity_reporting:
+ description: Whether host connectivity reporting feature has been enabled
+ returned: on success
+ type: str
+ sample: enabled
+cache_settings:
+ description: Current cache block size and flushing threshold values
+ returned: on success
+ type: dict
+ sample: {"cache_block_size": 32768, "cache_flush_threshold": 80}
+default_host_type_index:
+ description: Current default host type index
+ returned: on success
+ type: int
+ sample: 28
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesGlobalSettings(NetAppESeriesModule):
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(cache_block_size=dict(type="int", require=False),
+ cache_flush_threshold=dict(type="int", required=False),
+ default_host_type=dict(type="str", require=False),
+ automatic_load_balancing=dict(type="str", choices=["enabled", "disabled"], required=False),
+ host_connectivity_reporting=dict(type="str", choices=["enabled", "disabled"], required=False),
+ name=dict(type='str', required=False, aliases=['label']))
+
+ super(NetAppESeriesGlobalSettings, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True)
+ args = self.module.params
+ self.name = args["name"]
+ self.cache_block_size = args["cache_block_size"]
+ self.cache_flush_threshold = args["cache_flush_threshold"]
+ self.host_type_index = args["default_host_type"]
+
+ self.autoload_enabled = None
+ if args["automatic_load_balancing"]:
+ self.autoload_enabled = args["automatic_load_balancing"] == "enabled"
+
+ self.host_connectivity_reporting_enabled = None
+ if args["host_connectivity_reporting"]:
+ self.host_connectivity_reporting_enabled = args["host_connectivity_reporting"] == "enabled"
+ elif self.autoload_enabled:
+ self.host_connectivity_reporting_enabled = True
+
+ if self.autoload_enabled and not self.host_connectivity_reporting_enabled:
+ self.module.fail_json(msg="Option automatic_load_balancing requires host_connectivity_reporting to be enabled. Array [%s]." % self.ssid)
+
+ self.current_configuration_cache = None
+
+ def get_current_configuration(self, update=False):
+ """Retrieve the current storage array's global configuration."""
+ if self.current_configuration_cache is None or update:
+ self.current_configuration_cache = dict()
+
+ # Get the storage array's capabilities and available options
+ try:
+ rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid)
+ self.current_configuration_cache["autoload_capable"] = "capabilityAutoLoadBalancing" in capabilities["productCapabilities"]
+ self.current_configuration_cache["cache_block_size_options"] = capabilities["featureParameters"]["cacheBlockSizes"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array capabilities. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ try:
+ rc, host_types = self.request("storage-systems/%s/host-types" % self.ssid)
+ self.current_configuration_cache["host_type_options"] = dict()
+ for host_type in host_types:
+ self.current_configuration_cache["host_type_options"].update({host_type["code"].lower(): host_type["index"]})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array host options. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ # Get the current cache settings
+ try:
+ rc, settings = self.request("storage-systems/%s/graph/xpath-filter?query=/sa" % self.ssid)
+ self.current_configuration_cache["cache_settings"] = {"cache_block_size": settings[0]["cache"]["cacheBlkSize"],
+ "cache_flush_threshold": settings[0]["cache"]["demandFlushThreshold"]}
+ self.current_configuration_cache["default_host_type_index"] = settings[0]["defaultHostTypeIndex"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve cache settings. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ try:
+ rc, array_info = self.request("storage-systems/%s" % self.ssid)
+ self.current_configuration_cache["autoload_enabled"] = array_info["autoLoadBalancingEnabled"]
+ self.current_configuration_cache["host_connectivity_reporting_enabled"] = array_info["hostConnectivityReportingEnabled"]
+ self.current_configuration_cache["name"] = array_info['name']
+ except Exception as error:
+ self.module.fail_json(msg="Failed to determine current configuration. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return self.current_configuration_cache
+
+ def change_cache_block_size_required(self):
+ """Determine whether cache block size change is required."""
+ if self.cache_block_size is None:
+ return False
+
+ current_available_block_sizes = self.get_current_configuration()["cache_block_size_options"]
+ if self.cache_block_size not in current_available_block_sizes:
+ self.module.fail_json(msg="Invalid cache block size. Array [%s]. Available cache block sizes [%s]." % (self.ssid, current_available_block_sizes))
+
+ return self.cache_block_size != self.get_current_configuration()["cache_settings"]["cache_block_size"]
+
+ def change_cache_flush_threshold_required(self):
+ """Determine whether cache flush percentage change is required."""
+ if self.cache_flush_threshold is None:
+ return False
+
+ if self.cache_flush_threshold <= 0 or self.cache_flush_threshold >= 100:
+ self.module.fail_json(msg="Invalid cache flushing threshold, it must be equal to or between 0 and 100. Array [%s]" % self.ssid)
+
+ return self.cache_flush_threshold != self.get_current_configuration()["cache_settings"]["cache_flush_threshold"]
+
+ def change_host_type_required(self):
+ """Determine whether default host type change is required."""
+ if self.host_type_index is None:
+ return False
+
+ current_available_host_types = self.get_current_configuration()["host_type_options"]
+ if isinstance(self.host_type_index, str):
+ self.host_type_index = self.host_type_index.lower()
+
+ if self.host_type_index in self.HOST_TYPE_INDEXES.keys():
+ self.host_type_index = self.HOST_TYPE_INDEXES[self.host_type_index]
+ elif self.host_type_index in current_available_host_types.keys():
+ self.host_type_index = current_available_host_types[self.host_type_index]
+
+ if self.host_type_index not in current_available_host_types.values():
+ self.module.fail_json(msg="Invalid host type index! Array [%s]. Available host options [%s]." % (self.ssid, current_available_host_types))
+
+ return int(self.host_type_index) != self.get_current_configuration()["default_host_type_index"]
+
+ def change_autoload_enabled_required(self):
+ """Determine whether automatic load balancing state change is required."""
+ if self.autoload_enabled is None:
+ return False
+
+ change_required = False
+ if self.autoload_enabled and not self.get_current_configuration()["autoload_capable"]:
+ self.module.fail_json(msg="Automatic load balancing is not available. Array [%s]." % self.ssid)
+
+ if self.autoload_enabled:
+ if not self.get_current_configuration()["autoload_enabled"] or not self.get_current_configuration()["host_connectivity_reporting_enabled"]:
+ change_required = True
+ elif self.get_current_configuration()["autoload_enabled"]:
+ change_required = True
+
+ return change_required
+
+ def change_host_connectivity_reporting_enabled_required(self):
+ """Determine whether host connectivity reporting state change is required."""
+ if self.host_connectivity_reporting_enabled is None:
+ return False
+
+ return self.host_connectivity_reporting_enabled != self.get_current_configuration()["host_connectivity_reporting_enabled"]
+
+ def change_name_required(self):
+ """Determine whether storage array name change is required."""
+ if self.name is None:
+ return False
+
+ if self.name and len(self.name) > 30:
+ self.module.fail_json(msg="The provided name is invalid, it must be less than or equal to 30 characters in length. Array [%s]" % self.ssid)
+
+ return self.name != self.get_current_configuration()["name"]
+
+ def update_cache_settings(self):
+ """Update cache block size and/or flushing threshold."""
+ block_size = self.cache_block_size if self.cache_block_size else self.get_current_configuration()["cache_settings"]["cache_block_size"]
+ threshold = self.cache_flush_threshold if self.cache_flush_threshold else self.get_current_configuration()["cache_settings"]["cache_flush_threshold"]
+ try:
+ rc, cache_settings = self.request("storage-systems/%s/symbol/setSACacheParams?verboseErrorResponse=true" % self.ssid, method="POST",
+ data={"cacheBlkSize": block_size, "demandFlushAmount": threshold, "demandFlushThreshold": threshold})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set cache settings. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update_host_type(self):
+ """Update default host type."""
+ try:
+ rc, default_host_type = self.request("storage-systems/%s/symbol/setStorageArrayProperties?verboseErrorResponse=true" % self.ssid, method="POST",
+ data={"settings": {"defaultHostTypeIndex": self.host_type_index}})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set default host type. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ def update_autoload(self):
+ """Update automatic load balancing state."""
+ if self.autoload_enabled and not self.get_current_configuration()["host_connectivity_reporting_enabled"]:
+ try:
+ rc, host_connectivity_reporting = self.request("storage-systems/%s/symbol/setHostConnectivityReporting?verboseErrorResponse=true" % self.ssid,
+ method="POST", data={"enableHostConnectivityReporting": self.autoload_enabled})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to enable host connectivity reporting which is needed for automatic load balancing state."
+ " Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ try:
+ rc, autoload = self.request("storage-systems/%s/symbol/setAutoLoadBalancing?verboseErrorResponse=true" % self.ssid,
+ method="POST", data={"enableAutoLoadBalancing": self.autoload_enabled})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set automatic load balancing state. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update_host_connectivity_reporting_enabled(self):
+ """Update automatic load balancing state."""
+ try:
+ rc, host_connectivity_reporting = self.request("storage-systems/%s/symbol/setHostConnectivityReporting?verboseErrorResponse=true" % self.ssid,
+ method="POST", data={"enableHostConnectivityReporting": self.host_connectivity_reporting_enabled})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to enable host connectivity reporting. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def update_name(self):
+ """Update storage array's name."""
+ try:
+ rc, result = self.request("storage-systems/%s/configuration" % self.ssid, method="POST", data={"name": self.name})
+ except Exception as err:
+ self.module.fail_json(msg="Failed to set the storage array name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update(self):
+ """Ensure the storage array's global setting are correctly set."""
+ change_required = False
+ self.get_current_configuration()
+
+ if (self.change_autoload_enabled_required() or self.change_cache_block_size_required() or self.change_cache_flush_threshold_required() or
+ self.change_host_type_required() or self.change_name_required() or self.change_host_connectivity_reporting_enabled_required()):
+ change_required = True
+
+ if change_required and not self.module.check_mode:
+ if self.change_autoload_enabled_required():
+ self.update_autoload()
+ if self.change_host_connectivity_reporting_enabled_required():
+ self.update_host_connectivity_reporting_enabled()
+ if self.change_cache_block_size_required() or self.change_cache_flush_threshold_required():
+ self.update_cache_settings()
+ if self.change_host_type_required():
+ self.update_host_type()
+ if self.change_name_required():
+ self.update_name()
+
+ self.get_current_configuration(update=True)
+ self.module.exit_json(changed=change_required,
+ cache_settings=self.get_current_configuration()["cache_settings"],
+ default_host_type_index=self.get_current_configuration()["default_host_type_index"],
+ automatic_load_balancing="enabled" if self.get_current_configuration()["autoload_enabled"] else "disabled",
+ host_connectivity_reporting="enabled" if self.get_current_configuration()["host_connectivity_reporting_enabled"] else "disabled",
+ array_name=self.get_current_configuration()["name"])
+
+
+def main():
+ global_settings = NetAppESeriesGlobalSettings()
+ global_settings.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py
new file mode 100644
index 00000000..c57e13f5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py
@@ -0,0 +1,512 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_host
+short_description: NetApp E-Series manage eseries hosts
+description: Create, update, remove hosts on NetApp E-series storage arrays
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ name:
+ description:
+ - If the host doesn't yet exist, the label/name to assign at creation time.
+ - If the hosts already exists, this will be used to uniquely identify the host to make any required changes
+ type: str
+ required: True
+ aliases:
+ - label
+ state:
+ description:
+ - Set to absent to remove an existing host
+ - Set to present to modify or create a new host definition
+ type: str
+ choices:
+ - absent
+ - present
+ default: present
+ host_type:
+ description:
+ - Host type includes operating system and multipath considerations.
+ - If not specified, the default host type will be utilized. Default host type can be set using M(netapp_eseries.santricity.na_santricity_global).
+ - For storage array specific options see M(netapp_eseries.santricity.na_santricity_facts).
+ - All values are case-insensitive.
+ - AIX MPIO - The Advanced Interactive Executive (AIX) OS and the native MPIO driver
+ - AVT 4M - Silicon Graphics, Inc. (SGI) proprietary multipath driver
+ - HP-UX - The HP-UX OS with native multipath driver
+ - Linux ATTO - The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs)
+ - Linux DM-MP - The Linux OS and the native DM-MP driver
+ - Linux Pathmanager - The Linux OS and the SGI proprietary multipath driver
+ - Mac - The Mac OS and the ATTO Technology, Inc. driver
+ - ONTAP - FlexArray
+ - Solaris 11 or later - The Solaris 11 or later OS and the native MPxIO driver
+ - Solaris 10 or earlier - The Solaris 10 or earlier OS and the native MPxIO driver
+ - SVC - IBM SAN Volume Controller
+ - VMware - ESXi OS
+ - Windows - Windows Server OS and Windows MPIO with a DSM driver
+ - Windows Clustered - Clustered Windows Server OS and Windows MPIO with a DSM driver
+ - Windows ATTO - Windows OS and the ATTO Technology, Inc. driver
+ type: str
+ required: False
+ aliases:
+ - host_type_index
+ ports:
+ description:
+ - A list of host ports you wish to associate with the host.
+ - Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are
+ uniquely identified by a label and these must be unique.
+ type: list
+ required: False
+ suboptions:
+ type:
+ description:
+ - The interface type of the port to define.
+ - Acceptable choices depend on the capabilities of the target hardware/software platform.
+ required: true
+ choices:
+ - iscsi
+ - sas
+ - fc
+ - ib
+ - nvmeof
+ label:
+ description:
+ - A unique label to assign to this port assignment.
+ required: true
+ port:
+ description:
+ - The WWN or IQN of the hostPort to assign to this port definition.
+ required: true
+ force_port:
+ description:
+ - Allow ports that are already assigned to be re-assigned to your current host
+ required: false
+ type: bool
+"""
+
+EXAMPLES = """
+ - name: Define or update an existing host named "Host1"
+ na_santricity_host:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: "Host1"
+ state: present
+ host_type_index: Linux DM-MP
+ ports:
+ - type: "iscsi"
+ label: "PORT_1"
+ port: "iqn.1996-04.de.suse:01:56f86f9bd1fe"
+ - type: "fc"
+ label: "FC_1"
+ port: "10:00:FF:7C:FF:FF:FF:01"
+ - type: "fc"
+ label: "FC_2"
+ port: "10:00:FF:7C:FF:FF:FF:00"
+
+ - name: Ensure a host named "Host2" doesn"t exist
+ na_santricity_host:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: "Host2"
+ state: absent
+"""
+
+RETURN = """
+msg:
+ description:
+ - A user-readable description of the actions performed.
+ returned: on success
+ type: str
+ sample: The host has been created.
+id:
+ description:
+ - the unique identifier of the host on the E-Series storage-system
+ returned: on success when state=present
+ type: str
+ sample: 00000000600A098000AAC0C3003004700AD86A52
+ssid:
+ description:
+ - the unique identifer of the E-Series storage-system with the current api
+ returned: on success
+ type: str
+ sample: 1
+api_url:
+ description:
+ - the url of the API that this request was proccessed by
+ returned: on success
+ type: str
+ sample: https://webservices.example.com:8443
+"""
+import re
+
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+
+
+class NetAppESeriesHost(NetAppESeriesModule):
+ PORT_TYPES = ["iscsi", "sas", "fc", "ib", "nvmeof"]
+
+ def __init__(self):
+ ansible_options = dict(state=dict(type="str", default="present", choices=["absent", "present"]),
+ ports=dict(type="list", required=False),
+ force_port=dict(type="bool", default=False),
+ name=dict(type="str", required=True, aliases=["label"]),
+ host_type=dict(type="str", required=False, aliases=["host_type_index"]))
+
+ super(NetAppESeriesHost, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ self.check_mode = self.module.check_mode
+ args = self.module.params
+ self.ports = args["ports"]
+ self.force_port = args["force_port"]
+ self.name = args["name"]
+ self.state = args["state"]
+
+ self.post_body = dict()
+ self.all_hosts = list()
+ self.host_obj = dict()
+ self.new_ports = list()
+ self.ports_for_update = list()
+ self.ports_for_removal = list()
+
+ # Update host type with the corresponding index
+ host_type = args["host_type"]
+ if host_type:
+ host_type = host_type.lower()
+ if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]:
+ self.host_type_index = self.HOST_TYPE_INDEXES[host_type]
+ elif host_type.isdigit():
+ self.host_type_index = int(args["host_type"])
+ else:
+ self.module.fail_json(msg="host_type must be either a host type name or host type index found integer the documentation.")
+ else:
+ self.host_type_index = None
+
+ if not self.url.endswith("/"):
+ self.url += "/"
+
+ # Fix port representation if they are provided with colons
+ if self.ports is not None:
+ for port in self.ports:
+ port["label"] = port["label"].lower()
+ port["type"] = port["type"].lower()
+ port["port"] = port["port"].lower()
+
+ if port["type"] not in self.PORT_TYPES:
+ self.module.fail_json(msg="Invalid port type! Port interface type must be one of [%s]." % ", ".join(self.PORT_TYPES))
+
+ # Determine whether address is 16-byte WWPN and, if so, remove
+ if re.match(r"^(0x)?[0-9a-f]{16}$", port["port"].replace(":", "")):
+ port["port"] = port["port"].replace(":", '').replace("0x", "")
+
+ if port["type"] == "ib":
+ port["port"] = "0" * (32 - len(port["port"])) + port["port"]
+
+ @property
+ def default_host_type(self):
+ """Return the default host type index."""
+ try:
+ rc, default_index = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/defaultHostTypeIndex" % self.ssid)
+ return default_index[0]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve default host type index")
+
+ @property
+ def valid_host_type(self):
+ host_types = None
+ try:
+ rc, host_types = self.request("storage-systems/%s/host-types" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ try:
+ match = list(filter(lambda host_type: host_type["index"] == self.host_type_index, host_types))[0]
+ return True
+ except IndexError:
+ self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
+
+ def check_port_types(self):
+ """Check to see whether the port interface types are available on storage system."""
+ try:
+ rc, interfaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid)
+
+ for port in self.ports:
+ for interface in interfaces:
+
+ # Check for IB iSER
+ if port["type"] == "ib" and "iqn" in port["port"]:
+ if ((interface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and
+ interface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["type"] == "infiniband" and
+ interface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["infinibandData"]["isIser"]) or
+ (interface["ioInterfaceTypeData"]["interfaceType"] == "ib" and
+ interface["ioInterfaceTypeData"]["ib"]["isISERSupported"])):
+ port["type"] = "iscsi"
+ break
+ # Check for NVMe
+ elif (port["type"] == "nvmeof" and "commandProtocolPropertiesList" in interface and
+ "commandProtocolProperties" in interface["commandProtocolPropertiesList"] and
+ interface["commandProtocolPropertiesList"]["commandProtocolProperties"]):
+ if interface["commandProtocolPropertiesList"]["commandProtocolProperties"][0]["commandProtocol"] == "nvme":
+ break
+ # Check SAS, FC, iSCSI
+ elif ((port["type"] == "fc" and interface["ioInterfaceTypeData"]["interfaceType"] == "fibre") or
+ (port["type"] == interface["ioInterfaceTypeData"]["interfaceType"])):
+ break
+ else:
+ self.module.fail_json(msg="Invalid port type! Type [%s]. Port [%s]." % (port["type"], port["label"]))
+ except Exception as error:
+ # For older versions of web services
+ for port in self.ports:
+ if port["type"] == "ib" and "iqn" in port["port"]:
+ port["type"] = "iscsi"
+ break
+
+ def assigned_host_ports(self, apply_unassigning=False):
+ """Determine if the hostPorts requested have already been assigned and return list of required used ports."""
+ used_host_ports = {}
+ for host in self.all_hosts:
+ if host["label"] != self.name.lower():
+ for host_port in host["hostSidePorts"]:
+ for port in self.ports:
+ if port["port"] == host_port["address"] or port["label"] == host_port["label"]:
+ if not self.force_port:
+ self.module.fail_json(msg="Port label or address is already used and force_port option is set to false!")
+ else:
+ # Determine port reference
+ port_ref = [port["hostPortRef"] for port in host["ports"]
+ if port["hostPortName"] == host_port["address"]]
+ port_ref.extend([port["initiatorRef"] for port in host["initiators"]
+ if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
+
+ # Create dictionary of hosts containing list of port references
+ if host["hostRef"] not in used_host_ports.keys():
+ used_host_ports.update({host["hostRef"]: port_ref})
+ else:
+ used_host_ports[host["hostRef"]].extend(port_ref)
+ # else:
+ # for host_port in host["hostSidePorts"]:
+ # for port in self.ports:
+ # if ((host_port["label"] == port["label"] and host_port["address"] != port["port"]) or
+ # (host_port["label"] != port["label"] and host_port["address"] == port["port"])):
+ # if not self.force_port:
+ # self.module.fail_json(msg="Port label or address is already used and force_port is false!")
+ # # self.module.fail_json(msg="There are no host ports available OR there are not enough unassigned host ports")
+ # else:
+ # # Determine port reference
+ # port_ref = [port["hostPortRef"] for port in host["ports"]
+ # if port["hostPortName"] == host_port["address"]]
+ # port_ref.extend([port["initiatorRef"] for port in host["initiators"]
+ # if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
+ #
+ # # Create dictionary of hosts containing list of port references
+ # if host["hostRef"] not in used_host_ports.keys():
+ # used_host_ports.update({host["hostRef"]: port_ref})
+ # else:
+ # used_host_ports[host["hostRef"]].extend(port_ref)
+
+ # Unassign assigned ports
+ if apply_unassigning:
+ for host_ref in used_host_ports.keys():
+ try:
+ rc, resp = self.request("storage-systems/%s/hosts/%s" % (self.ssid, host_ref), method="POST",
+ data={"portsToRemove": used_host_ports[host_ref]})
+ except Exception as err:
+ self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]. Error [%s]."
+ % (self.host_obj["id"], self.ssid, used_host_ports[host_ref], to_native(err)))
+
+ return used_host_ports
+
+ @property
+ def host_exists(self):
+ """Determine if the requested host exists
+ As a side effect, set the full list of defined hosts in "all_hosts", and the target host in "host_obj".
+ """
+ match = False
+ all_hosts = list()
+
+ try:
+ rc, all_hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ # Augment the host objects
+ for host in all_hosts:
+ host["label"] = host["label"].lower()
+ for port in host["hostSidePorts"]:
+ port["type"] = port["type"].lower()
+ port["address"] = port["address"].lower()
+ port["label"] = port["label"].lower()
+
+ # Augment hostSidePorts with their ID (this is an omission in the API)
+ ports = dict((port["label"], port["id"]) for port in host["ports"])
+ ports.update((port["label"], port["id"]) for port in host["initiators"])
+
+ for host_side_port in host["hostSidePorts"]:
+ if host_side_port["label"] in ports:
+ host_side_port["id"] = ports[host_side_port["label"]]
+
+ if host["label"] == self.name.lower():
+ self.host_obj = host
+ match = True
+
+ self.all_hosts = all_hosts
+ return match
+
+ @property
+ def needs_update(self):
+ """Determine whether we need to update the Host object
+ As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add
+ (newPorts), on self.
+ """
+ changed = False
+ if self.host_obj["hostTypeIndex"] != self.host_type_index:
+ changed = True
+
+ current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]})
+ for port in self.host_obj["hostSidePorts"])
+
+ if self.ports:
+ for port in self.ports:
+ for current_host_port_id in current_host_ports.keys():
+ if port == current_host_ports[current_host_port_id]:
+ current_host_ports.pop(current_host_port_id)
+ break
+
+ elif port["port"] == current_host_ports[current_host_port_id]["port"]:
+ if self.port_on_diff_host(port) and not self.force_port:
+ self.module.fail_json(msg="The port you specified [%s] is associated with a different host."
+ " Specify force_port as True or try a different port spec" % port)
+
+ if (port["label"] != current_host_ports[current_host_port_id]["label"] or
+ port["type"] != current_host_ports[current_host_port_id]["type"]):
+ current_host_ports.pop(current_host_port_id)
+ self.ports_for_update.append({"portRef": current_host_port_id, "port": port["port"],
+ "label": port["label"], "hostRef": self.host_obj["hostRef"]})
+ break
+ else:
+ self.new_ports.append(port)
+
+ self.ports_for_removal = list(current_host_ports.keys())
+ changed = any([self.new_ports, self.ports_for_update, self.ports_for_removal, changed])
+ return changed
+
+ def port_on_diff_host(self, arg_port):
+ """ Checks to see if a passed in port arg is present on a different host """
+ for host in self.all_hosts:
+ # Only check "other" hosts
+ if host["name"] != self.name:
+ for port in host["hostSidePorts"]:
+ # Check if the port label is found in the port dict list of each host
+ if arg_port["label"] == port["label"] or arg_port["port"] == port["address"]:
+ self.other_host = host
+
+ return True
+ return False
+
+ def update_host(self):
+ if self.ports:
+
+ # Remove ports that need reassigning from their current host.
+ self.assigned_host_ports(apply_unassigning=True)
+
+ self.post_body["portsToUpdate"] = self.ports_for_update
+ self.post_body["portsToRemove"] = self.ports_for_removal
+ self.post_body["ports"] = self.new_ports
+
+ self.post_body["hostType"] = dict(index=self.host_type_index)
+ if not self.check_mode:
+ try:
+ rc, self.host_obj = self.request("storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj["id"]), method="POST", data=self.post_body, ignore_errors=True)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ self.module.exit_json(changed=True)
+
+ def create_host(self):
+ # Remove ports that need reassigning from their current host.
+ self.assigned_host_ports(apply_unassigning=True)
+
+ # needs_reassignment = False
+ post_body = dict(name=self.name,
+ hostType=dict(index=self.host_type_index))
+
+ if self.ports:
+ post_body.update(ports=self.ports)
+
+ if not self.host_exists:
+ if not self.check_mode:
+ try:
+ rc, self.host_obj = self.request("storage-systems/%s/hosts" % self.ssid, method="POST", data=post_body, ignore_errors=True)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ else:
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload)
+
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=True, msg="Host created.")
+
+ def remove_host(self):
+ try:
+ rc, resp = self.request("storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj["id"]), method="DELETE")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj["id"], self.ssid, to_native(err)))
+
+ def build_success_payload(self, host=None):
+ keys = ["id"]
+
+ if host:
+ result = dict((key, host[key]) for key in keys)
+ else:
+ result = dict()
+ result["ssid"] = self.ssid
+ result["api_url"] = self.url
+ return result
+
+ def apply(self):
+ if self.state == "present":
+ if self.host_type_index is None:
+ self.host_type_index = self.default_host_type
+
+ self.check_port_types()
+ if self.host_exists:
+ if self.needs_update and self.valid_host_type:
+ self.update_host()
+ else:
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload)
+ elif self.valid_host_type:
+ self.create_host()
+ else:
+ payload = self.build_success_payload()
+ if self.host_exists:
+ self.remove_host()
+ self.module.exit_json(changed=True, msg="Host removed.", **payload)
+ else:
+ self.module.exit_json(changed=False, msg="Host already absent.", **payload)
+
+
+def main():
+ host = NetAppESeriesHost()
+ host.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py
new file mode 100644
index 00000000..7b8a9e2a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_hostgroup
+short_description: NetApp E-Series manage array host groups
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+description: Create, update or destroy host groups on a NetApp E-Series storage array.
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Whether the specified host group should exist or not.
+ type: str
+ choices: ["present", "absent"]
+ default: present
+ name:
+ description:
+ - Name of the host group to manage
+ type: str
+ required: false
+ hosts:
+ description:
+ - List of host names/labels to add to the group
+ type: list
+ required: false
+"""
+EXAMPLES = """
+ - name: Configure Hostgroup
+ na_santricity_hostgroup:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: example_hostgroup
+ hosts:
+ - host01
+ - host02
+"""
+RETURN = """
+clusterRef:
+ description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
+ returned: always except when state is absent
+ type: str
+ sample: "3233343536373839303132333100000000000000"
+confirmLUNMappingCreation:
+ description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
+ will alter the volume access rights of other clusters, in addition to this one.
+ returned: always
+ type: bool
+ sample: false
+hosts:
+ description: A list of the hosts that are part of the host group after all operations.
+ returned: always except when state is absent
+ type: list
+ sample: ["HostA","HostB"]
+id:
+ description: The id number of the hostgroup
+ returned: always except when state is absent
+ type: str
+ sample: "3233343536373839303132333100000000000000"
+isSAControlled:
+ description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
+ indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
+ returned: always except when state is absent
+ type: bool
+ sample: false
+label:
+ description: The user-assigned, descriptive label string for the cluster.
+ returned: always
+ type: str
+ sample: "MyHostGroup"
+name:
+ description: same as label
+ returned: always except when state is absent
+ type: str
+ sample: "MyHostGroup"
+protectionInformationCapableAccessMethod:
+ description: This field is true if the host has a PI capable access method.
+ returned: always except when state is absent
+ type: bool
+ sample: true
+"""
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+
+
+class NetAppESeriesHostGroup(NetAppESeriesModule):
+ EXPANSION_TIMEOUT_SEC = 10
+ DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(
+ state=dict(choices=["present", "absent"], type="str", default="present"),
+ name=dict(required=True, type="str"),
+ hosts=dict(required=False, type="list"))
+ super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.name = args["name"]
+ self.hosts_list = args["hosts"]
+
+ self.current_host_group = None
+ self.hosts_cache = None
+
+ @property
+ def hosts(self):
+ """Retrieve a list of host reference identifiers should be associated with the host group."""
+ if self.hosts_cache is None:
+ self.hosts_cache = []
+ existing_hosts = []
+
+ if self.hosts_list:
+ try:
+ rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ for host in self.hosts_list:
+ for existing_host in existing_hosts:
+ if host in existing_host["id"] or host.lower() in existing_host["name"].lower():
+ self.hosts_cache.append(existing_host["id"])
+ break
+ else:
+ self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]." % (self.ssid, host))
+ self.hosts_cache.sort()
+ return self.hosts_cache
+
+ @property
+ def host_groups(self):
+ """Retrieve a list of existing host groups."""
+ host_groups = []
+ hosts = []
+ try:
+ rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid)
+ rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups]
+ for group in host_groups:
+ hosts_ids = []
+ for host in hosts:
+ if group["id"] == host["clusterRef"]:
+ hosts_ids.append(host["hostRef"])
+ group.update({"hosts": hosts_ids})
+
+ return host_groups
+
+ @property
+ def current_hosts_in_host_group(self):
+ """Retrieve the current hosts associated with the current hostgroup."""
+ current_hosts = []
+ for group in self.host_groups:
+ if group["name"] == self.name:
+ current_hosts = group["hosts"]
+ break
+
+ return current_hosts
+
+ def unassign_hosts(self, host_list=None):
+ """Unassign hosts from host group."""
+ if host_list is None:
+ host_list = self.current_host_group["hosts"]
+
+ for host_id in host_list:
+ try:
+ rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id),
+ method="POST", data={"group": "0000000000000000000000000000000000000000"})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]."
+ " Error[%s]." % (self.ssid, host_id, to_native(error)))
+
+ def delete_host_group(self, unassign_hosts=True):
+ """Delete host group"""
+ if unassign_hosts:
+ self.unassign_hosts()
+
+ try:
+ rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ def create_host_group(self):
+ """Create host group."""
+ data = {"name": self.name, "hosts": self.hosts}
+
+ response = None
+ try:
+ rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ return response
+
+ def update_host_group(self):
+ """Update host group."""
+ data = {"name": self.name, "hosts": self.hosts}
+
+ # unassign hosts that should not be part of the hostgroup
+ desired_host_ids = self.hosts
+ for host in self.current_hosts_in_host_group:
+ if host not in desired_host_ids:
+ self.unassign_hosts([host])
+
+ update_response = None
+ try:
+ rc, update_response = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]), method="POST", data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ return update_response
+
+ def apply(self):
+ """Apply desired host group state to the storage array."""
+ changes_required = False
+
+ # Search for existing host group match
+ for group in self.host_groups:
+ if group["name"] == self.name:
+ self.current_host_group = group
+ self.current_host_group["hosts"].sort()
+ break
+
+ # Determine whether changes are required
+ if self.state == "present":
+ if self.current_host_group:
+ if self.hosts and self.hosts != self.current_host_group["hosts"]:
+ changes_required = True
+ else:
+ if not self.name:
+ self.module.fail_json(msg="The option name must be supplied when creating a new host group. Array id [%s]." % self.ssid)
+ changes_required = True
+
+ elif self.current_host_group:
+ changes_required = True
+
+ # Apply any necessary changes
+ msg = ""
+ if changes_required and not self.module.check_mode:
+ msg = "No changes required."
+ if self.state == "present":
+ if self.current_host_group:
+ if self.hosts != self.current_host_group["hosts"]:
+ msg = self.update_host_group()
+ else:
+ msg = self.create_host_group()
+
+ elif self.current_host_group:
+ self.delete_host_group()
+ msg = "Host group deleted. Array Id [%s]. Host group [%s]." % (self.ssid, self.current_host_group["name"])
+
+ self.module.exit_json(msg=msg, changed=changes_required)
+
+
+def main():
+ hostgroup = NetAppESeriesHostGroup()
+ hostgroup.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py
new file mode 100644
index 00000000..05119eb2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_ib_iser_interface
+short_description: NetApp E-Series manage InfiniBand iSER interface configuration
+description:
+ - Configure settings of an E-Series InfiniBand iSER interface IPv4 address configuration.
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are presented alphabetically, with the first controller as A, the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard limitation and could change in the future.
+ type: str
+ required: true
+ choices:
+ - A
+ - B
+ channel:
+ description:
+ - The InfiniBand HCA port you wish to modify.
+ - Ports start left to right and start with 1.
+ type: int
+ required: true
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ type: str
+ required: true
+notes:
+ - Check mode is supported.
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ na_santricity_ib_iser_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ controller: "A"
+ channel: "1"
+ address: "192.168.1.100"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+enabled:
+ description:
+ - Indicates whether IPv4 connectivity has been enabled or disabled.
+ - This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance,
+ it is unlikely that the configuration will actually be valid.
+ returned: on success
+ sample: True
+ type: bool
+"""
+import re
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesIbIserInterface(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(controller=dict(type="str", required=True, choices=["A", "B"]),
+ channel=dict(type="int"),
+ address=dict(type="str", required=True))
+
+ super(NetAppESeriesIbIserInterface, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.controller = args["controller"]
+ self.channel = args["channel"]
+ self.address = args["address"]
+ self.check_mode = self.module.check_mode
+
+ self.get_target_interface_cache = None
+
+ # A relatively primitive regex to validate that the input is formatted like a valid ip address
+ address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+ if self.address and not address_regex.match(self.address):
+ self.module.fail_json(msg="An invalid ip address was provided for address.")
+
+ def get_interfaces(self):
+ """Retrieve and filter all hostside interfaces for IB iSER."""
+ ifaces = []
+ try:
+ rc, ifaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ # Filter out non-ib-iser interfaces
+ ib_iser_ifaces = []
+ for iface in ifaces:
+ if ((iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and
+ iface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["type"] == "infiniband" and
+ iface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["infinibandData"]["isIser"]) or
+ (iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and
+ iface["ioInterfaceTypeData"]["ib"]["isISERSupported"])):
+ ib_iser_ifaces.append(iface)
+
+ if not ib_iser_ifaces:
+ self.module.fail_json(msg="Failed to detect any InfiniBand iSER interfaces! Array [%s] - %s." % self.ssid)
+
+ return ib_iser_ifaces
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ {
+ 'A': '070000000000000000000001',
+ 'B': '070000000000000000000002',
+ }
+ :return: the controllers defined on the system
+ """
+ controllers = list()
+ try:
+ rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[label] = controller
+ i += 1
+
+ return controllers_dict
+
+ def get_ib_link_status(self):
+ """Determine the infiniband link status. Returns dictionary keyed by interface reference number."""
+ link_statuses = {}
+ try:
+ rc, result = self.request("storage-systems/%s/hardware-inventory" % self.ssid)
+ for link in result["ibPorts"]:
+ link_statuses.update({link["channelPortRef"]: link["linkState"]})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve ib link status information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return link_statuses
+
+ def get_target_interface(self):
+ """Search for the selected IB iSER interface"""
+ if self.get_target_interface_cache is None:
+ ifaces = self.get_interfaces()
+ ifaces_status = self.get_ib_link_status()
+ controller_id = self.get_controllers()[self.controller]
+
+ controller_ifaces = []
+ for iface in ifaces:
+ if iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and iface["ioInterfaceTypeData"]["iscsi"]["controllerId"] == controller_id:
+ controller_ifaces.append([iface["ioInterfaceTypeData"]["iscsi"]["channel"], iface,
+ ifaces_status[iface["ioInterfaceTypeData"]["iscsi"]["channelPortRef"]]])
+ elif iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and iface["ioInterfaceTypeData"]["ib"]["controllerId"] == controller_id:
+ controller_ifaces.append([iface["ioInterfaceTypeData"]["ib"]["channel"], iface,
+ iface["ioInterfaceTypeData"]["ib"]["linkState"]])
+
+ sorted_controller_ifaces = sorted(controller_ifaces)
+ if self.channel < 1 or self.channel > len(controller_ifaces):
+ status_msg = ", ".join(["%s (link %s)" % (index + 1, values[2])
+ for index, values in enumerate(sorted_controller_ifaces)])
+ self.module.fail_json(msg="Invalid controller %s HCA channel. Available channels: %s, Array Id [%s]."
+ % (self.controller, status_msg, self.ssid))
+
+ self.get_target_interface_cache = sorted_controller_ifaces[self.channel - 1][1]
+ return self.get_target_interface_cache
+
+ def is_change_required(self):
+ """Determine whether change is required."""
+ iface = self.get_target_interface()
+ if ((iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and iface["iscsi"]["ipv4Data"]["ipv4AddressData"]["ipv4Address"] != self.address) or
+ (iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and
+ iface["commandProtocolPropertiesList"]["commandProtocolProperties"][0]["scsiProperties"]["scsiProtocolType"] == "iser")):
+ return True
+
+ return False
+
+ def make_request_body(self):
+ iface = self.get_target_interface()
+ body = {"iscsiInterface": iface["ioInterfaceTypeData"][iface["ioInterfaceTypeData"]["interfaceType"]]["id"],
+ "settings": {"tcpListenPort": [],
+ "ipv4Address": [self.address],
+ "ipv4SubnetMask": [],
+ "ipv4GatewayAddress": [],
+ "ipv4AddressConfigMethod": [],
+ "maximumFramePayloadSize": [],
+ "ipv4VlanId": [],
+ "ipv4OutboundPacketPriority": [],
+ "ipv4Enabled": [],
+ "ipv6Enabled": [],
+ "ipv6LocalAddresses": [],
+ "ipv6RoutableAddresses": [],
+ "ipv6PortRouterAddress": [],
+ "ipv6AddressConfigMethod": [],
+ "ipv6OutboundPacketPriority": [],
+ "ipv6VlanId": [],
+ "ipv6HopLimit": [],
+ "ipv6NdReachableTime": [],
+ "ipv6NdRetransmitTime": [],
+ "ipv6NdStaleTimeout": [],
+ "ipv6DuplicateAddressDetectionAttempts": [],
+ "maximumInterfaceSpeed": []}}
+ return body
+
+ def update(self):
+ """Make any necessary updates."""
+ update_required = self.is_change_required()
+ if update_required and not self.check_mode:
+ try:
+ rc, result = self.request("storage-systems/%s/symbol/setIscsiInterfaceProperties"
+ % self.ssid, method="POST", data=self.make_request_body())
+ except Exception as error:
+ self.module.fail_json(msg="Failed to modify the interface! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update_required)
+
+ self.module.exit_json(msg="No changes were required.", changed=update_required)
+
+
+def main():
+ ib_iser = NetAppESeriesIbIserInterface()
+ ib_iser.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py
new file mode 100644
index 00000000..6da22918
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_iscsi_interface
+short_description: NetApp E-Series manage iSCSI interface configuration
+description:
+ - Configure settings of an E-Series iSCSI interface
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are presented alphabetically, with the first controller as A,
+ the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
+ limitation and could change in the future.
+ type: str
+ required: true
+ choices:
+ - A
+ - B
+ port:
+ description:
+ - The controller iSCSI HIC port to modify.
+ - You can determine this value by numbering the iSCSI ports left to right on the controller you wish to modify starting with one.
+ type: int
+ required: true
+ state:
+ description:
+ - When enabled, the provided configuration will be utilized.
+ - When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled.
+ type: str
+ choices:
+ - enabled
+ - disabled
+ default: enabled
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ config_method:
+ description:
+ - The configuration method type to use for this interface.
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ type: str
+ choices:
+ - dhcp
+ - static
+ default: dhcp
+ required: false
+ mtu:
+ description:
+ - The maximum transmission units (MTU), in bytes.
+ - This allows you to configure a larger value for the MTU, in order to enable jumbo frames
+ (any value > 1500).
+ - Generally, it is necessary to have your host, switches, and other components not only support jumbo
+ frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
+ leave this at the default.
+ type: int
+ default: 1500
+ required: false
+ aliases:
+ - max_frame_size
+notes:
+ - Check mode is supported.
+ - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
+ via dhcp, etc), can take seconds or minutes longer to take effect.
+ - This module will not be useful/usable on an E-Series system without any iSCSI interfaces.
+ - This module requires a Web Services API version of >= 1.3.
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ na_santricity_iscsi_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "1"
+ controller: "A"
+ config_method: static
+ address: "192.168.1.100"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.1.1"
+
+ - name: Disable ipv4 connectivity for the second port on the B controller
+ na_santricity_iscsi_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "2"
+ controller: "B"
+ state: disabled
+
+ - name: Enable jumbo frames for the first 4 ports on controller A
+ na_santricity_iscsi_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "{{ item }}"
+ controller: "A"
+ state: enabled
+ mtu: 9000
+ config_method: dhcp
+ loop:
+ - 1
+ - 2
+ - 3
+ - 4
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+"""
+import re
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesIscsiInterface(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(controller=dict(type="str", required=True, choices=["A", "B"]),
+ port=dict(type="int", required=True),
+ state=dict(type="str", required=False, default="enabled", choices=["enabled", "disabled"]),
+ address=dict(type="str", required=False),
+ subnet_mask=dict(type="str", required=False),
+ gateway=dict(type="str", required=False),
+ config_method=dict(type="str", required=False, default="dhcp", choices=["dhcp", "static"]),
+ mtu=dict(type="int", default=1500, required=False, aliases=["max_frame_size"]))
+
+ required_if = [["config_method", "static", ["address", "subnet_mask"]]]
+ super(NetAppESeriesIscsiInterface, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.controller = args["controller"]
+ self.port = args["port"]
+ self.mtu = args["mtu"]
+ self.state = args["state"]
+ self.address = args["address"]
+ self.subnet_mask = args["subnet_mask"]
+ self.gateway = args["gateway"]
+ self.config_method = args["config_method"]
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+ self.controllers = list()
+ self.get_target_interface_cache = None
+
+ if self.mtu < 1500 or self.mtu > 9000:
+ self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.")
+
+ if self.config_method == "dhcp" and any([self.address, self.subnet_mask, self.gateway]):
+ self.module.fail_json(msg="A config_method of dhcp is mutually exclusive with the address,"
+ " subnet_mask, and gateway options.")
+
+ # A relatively primitive regex to validate that the input is formatted like a valid ip address
+ address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+
+ if self.address and not address_regex.match(self.address):
+ self.module.fail_json(msg="An invalid ip address was provided for address.")
+
+ if self.subnet_mask and not address_regex.match(self.subnet_mask):
+ self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.")
+
+ if self.gateway and not address_regex.match(self.gateway):
+ self.module.fail_json(msg="An invalid ip address was provided for gateway.")
+
+ @property
+ def interfaces(self):
+ ifaces = list()
+ try:
+ rc, ifaces = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ # Filter out non-iSCSI interfaces
+ iscsi_interfaces = []
+ for iface in [iface for iface in ifaces if iface["interfaceType"] == "iscsi"]:
+ if iface["iscsi"]["interfaceData"]["type"] == "ethernet":
+ iscsi_interfaces.append(iface)
+
+ return iscsi_interfaces
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ {
+ "A": "070000000000000000000001",
+ "B": "070000000000000000000002",
+ }
+ :return: the controllers defined on the system
+ """
+ controllers = list()
+ try:
+ rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord("A")
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[label] = controller
+ i += 1
+
+ return controllers_dict
+
+ def get_target_interface(self):
+ """Retrieve the specific controller iSCSI interface."""
+ if self.get_target_interface_cache is None:
+ ifaces = self.interfaces
+
+ controller_ifaces = []
+ for iface in ifaces:
+ if self.controllers[self.controller] == iface["iscsi"]["controllerId"]:
+ controller_ifaces.append([iface["iscsi"]["channel"], iface, iface["iscsi"]["interfaceData"]["ethernetData"]["linkStatus"]])
+
+ sorted_controller_ifaces = sorted(controller_ifaces)
+ if self.port < 1 or self.port > len(controller_ifaces):
+ status_msg = ", ".join(["%s (link %s)" % (index + 1, values[2]) for index, values in enumerate(sorted_controller_ifaces)])
+ self.module.fail_json(msg="Invalid controller %s iSCSI port. Available ports: %s, Array Id [%s]."
+ % (self.controller, status_msg, self.ssid))
+
+ self.get_target_interface_cache = sorted_controller_ifaces[self.port - 1][1]
+ return self.get_target_interface_cache
+
+ def make_update_body(self, target_iface):
+ target_iface = target_iface["iscsi"]
+ body = dict(iscsiInterface=target_iface["id"])
+ update_required = False
+
+ if self.state == "enabled":
+ settings = dict()
+ if not target_iface["ipv4Enabled"]:
+ update_required = True
+ settings["ipv4Enabled"] = [True]
+ if self.mtu != target_iface["interfaceData"]["ethernetData"]["maximumFramePayloadSize"]:
+ update_required = True
+ settings["maximumFramePayloadSize"] = [self.mtu]
+ if self.config_method == "static":
+ ipv4Data = target_iface["ipv4Data"]["ipv4AddressData"]
+
+ if ipv4Data["ipv4Address"] != self.address:
+ update_required = True
+ settings["ipv4Address"] = [self.address]
+ if ipv4Data["ipv4SubnetMask"] != self.subnet_mask:
+ update_required = True
+ settings["ipv4SubnetMask"] = [self.subnet_mask]
+ if self.gateway is not None and ipv4Data["ipv4GatewayAddress"] != self.gateway:
+ update_required = True
+ settings["ipv4GatewayAddress"] = [self.gateway]
+
+ if target_iface["ipv4Data"]["ipv4AddressConfigMethod"] != "configStatic":
+ update_required = True
+ settings["ipv4AddressConfigMethod"] = ["configStatic"]
+
+ elif target_iface["ipv4Data"]["ipv4AddressConfigMethod"] != "configDhcp":
+ update_required = True
+ settings.update(dict(ipv4Enabled=[True],
+ ipv4AddressConfigMethod=["configDhcp"]))
+ body["settings"] = settings
+
+ else:
+ if target_iface["ipv4Enabled"]:
+ update_required = True
+ body["settings"] = dict(ipv4Enabled=[False])
+
+ return update_required, body
+
+ def update(self):
+ self.controllers = self.get_controllers()
+ if self.controller not in self.controllers:
+ self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s." % ", ".join(self.controllers.keys()))
+
+ iface_before = self.get_target_interface()
+ update_required, body = self.make_update_body(iface_before)
+ if update_required and not self.check_mode:
+ try:
+ rc, result = self.request("storage-systems/%s/symbol/setIscsiInterfaceProperties" % self.ssid, method="POST", data=body, ignore_errors=True)
+ # We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook
+ # is cancelled mid-flight), that it isn't worth the complexity.
+ if rc == 422 and result["retcode"] in ["busy", "3"]:
+ self.module.fail_json(msg="The interface is currently busy (probably processing a previously requested modification request)."
+ " This operation cannot currently be completed. Array Id [%s]. Error [%s]." % (self.ssid, result))
+ # Handle authentication issues, etc.
+ elif rc != 200:
+ self.module.fail_json(msg="Failed to modify the interface! Array Id [%s]. Error [%s]." % (self.ssid, to_native(result)))
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update_required)
+
+
+def main():
+ iface = NetAppESeriesIscsiInterface()
+ iface.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py
new file mode 100644
index 00000000..46a93d2e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_iscsi_target
+short_description: NetApp E-Series manage iSCSI target configuration
+description:
+ - Configure the settings of an E-Series iSCSI target
+author:
+ - Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ name:
+ description:
+ - The name/alias to assign to the iSCSI target.
+ - This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
+ type: str
+ required: false
+ aliases:
+ - alias
+ ping:
+ description:
+ - Enable ICMP ping responses from the configured iSCSI ports.
+ type: bool
+ default: true
+ required: false
+ chap_secret:
+ description:
+ - Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
+ - When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
+ whether or not the password has changed.
+ - The chap secret may only use ascii characters with values between 32 and 126 decimal.
+ - The chap secret must be no less than 12 characters, but no greater than 57 characters in length.
+ - The chap secret is cleared when not specified or an empty string.
+ type: str
+ required: false
+ aliases:
+ - chap
+ - password
+ unnamed_discovery:
+ description:
+ - When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
+ discovery session if the iSCSI target iqn is not specified in the request.
+ - This option may be disabled to increase security if desired.
+ type: bool
+ default: true
+ required: false
+notes:
+ - Check mode is supported.
+ - Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
+ M(netapp_eseries.santricity.na_santricity_iscsi_interface).
+ - This module requires a Web Services API version of >= 1.3.
+"""
+
+EXAMPLES = """
+ - name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
+ na_santricity_iscsi_target:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: myTarget
+ ping: true
+ unnamed_discovery: true
+
+ - name: Set the target alias and the CHAP secret
+ na_santricity_iscsi_target:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ name: myTarget
+ chap: password1234
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The iSCSI target settings have been updated.
+alias:
+ description:
+ - The alias assigned to the iSCSI target.
+ returned: on success
+ sample: myArray
+ type: str
+iqn:
+ description:
+ - The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
+ returned: on success
+ sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
+ type: str
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class NetAppESeriesIscsiTarget(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(name=dict(type="str", required=False, aliases=["alias"]),
+ ping=dict(type="bool", required=False, default=True),
+ chap_secret=dict(type="str", required=False, aliases=["chap", "password"], no_log=True),
+ unnamed_discovery=dict(type="bool", required=False, default=True))
+
+ super(NetAppESeriesIscsiTarget, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+
+ self.name = args["name"]
+ self.ping = args["ping"]
+ self.chap_secret = args["chap_secret"]
+ self.unnamed_discovery = args["unnamed_discovery"]
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+ self.controllers = list()
+
+ if self.chap_secret:
+ if len(self.chap_secret) < 12 or len(self.chap_secret) > 57:
+ self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57"
+ " characters in length.")
+
+ for c in self.chap_secret:
+ ordinal = ord(c)
+ if ordinal < 32 or ordinal > 126:
+ self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
+ " characters with decimal values between 32 and 126.")
+
+ @property
+ def target(self):
+ """Provide information on the iSCSI Target configuration
+
+ Sample:
+ {
+ "alias": "myCustomName",
+ "ping": True,
+ "unnamed_discovery": True,
+ "chap": False,
+ "iqn": "iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45",
+ }
+ """
+ target = dict()
+ try:
+ rc, data = self.request("storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target" % self.ssid)
+ # This likely isn"t an iSCSI-enabled system
+ if not data:
+ self.module.fail_json(msg="This storage-system does not appear to have iSCSI interfaces. Array Id [%s]." % self.ssid)
+
+ data = data[0]
+ chap = any([auth for auth in data["configuredAuthMethods"]["authMethodData"] if auth["authMethod"] == "chap"])
+ target.update(dict(alias=data["alias"]["iscsiAlias"], iqn=data["nodeName"]["iscsiNodeName"], chap=chap))
+
+ rc, data = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData" % self.ssid)
+
+ data = data[0]
+ target.update(dict(ping=data["icmpPingResponseEnabled"], unnamed_discovery=data["unnamedDiscoverySessionsEnabled"]))
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ return target
+
+ def apply_iscsi_settings(self):
+ """Update the iSCSI target alias and CHAP settings"""
+ update = False
+ target = self.target
+
+ body = dict()
+
+ if self.name is not None and self.name != target["alias"]:
+ update = True
+ body["alias"] = self.name
+
+ # If the CHAP secret was provided, we trigger an update.
+ if self.chap_secret:
+ update = True
+ body.update(dict(enableChapAuthentication=True,
+ chapSecret=self.chap_secret))
+ # If no secret was provided, then we disable chap
+ elif target["chap"]:
+ update = True
+ body.update(dict(enableChapAuthentication=False))
+
+ if update and not self.check_mode:
+ try:
+ self.request("storage-systems/%s/iscsi/target-settings" % self.ssid, method="POST", data=body)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ return update
+
+ def apply_target_changes(self):
+ update = False
+ target = self.target
+
+ body = dict()
+
+ if self.ping != target["ping"]:
+ update = True
+ body["icmpPingResponseEnabled"] = self.ping
+
+ if self.unnamed_discovery != target["unnamed_discovery"]:
+ update = True
+ body["unnamedDiscoverySessionsEnabled"] = self.unnamed_discovery
+
+ if update and not self.check_mode:
+ try:
+ self.request("storage-systems/%s/iscsi/entity" % self.ssid, method="POST", data=body)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ return update
+
+ def update(self):
+ update = self.apply_iscsi_settings()
+ update = self.apply_target_changes() or update
+
+ target = self.target
+ data = dict((key, target[key]) for key in target if key in ["iqn", "alias"])
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
+
+
+def main():
+ iface = NetAppESeriesIscsiTarget()
+ iface.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py
new file mode 100644
index 00000000..ab2b2ae3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py
@@ -0,0 +1,391 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_ldap
+short_description: NetApp E-Series manage LDAP integration to use for authentication
+description:
+ - Configure an E-Series system to allow authentication via an LDAP server
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - When I(state=="present") the defined LDAP domain will be added to the storage system.
+ - When I(state=="absent") the domain specified will be removed from the storage system.
+ - I(state=="disabled") will result in deleting all existing LDAP domains on the storage system.
+ type: str
+ choices:
+ - present
+ - absent
+ - disabled
+ default: present
+ identifier:
+ description:
+ - This is a unique identifier for the configuration (for cases where there are multiple domains configured).
+ type: str
+ default: "default"
+ required: false
+ bind_user:
+ description:
+ - This is the user account that will be used for querying the LDAP server.
+ - Required when I(bind_password) is specified.
+ - "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com"
+ type: str
+ required: false
+ bind_password:
+ description:
+ - This is the password for the bind user account.
+ - Required when I(bind_user) is specified.
+ type: str
+ required: false
+ server_url:
+ description:
+ - This is the LDAP server url.
+ - The connection string should be specified as using the ldap or ldaps protocol along with the port information.
+ type: str
+ required: false
+ names:
+ description:
+ - The domain name[s] that will be utilized when authenticating to identify which domain to utilize.
+ - Default to use the DNS name of the I(server).
+ - The only requirement is that the name[s] be resolvable.
+ - "Example: user@example.com"
+ type: list
+ required: false
+ search_base:
+ description:
+ - The search base is used to find group memberships of the user.
+ - "Example: ou=users,dc=example,dc=com"
+ type: str
+ required: false
+ role_mappings:
+ description:
+ - This is where you specify which groups should have access to what permissions for the
+ storage-system.
+ - For example, all users in group A will be assigned all 4 available roles, which will allow access
+ to all the management functionality of the system (super-user). Those in group B only have the
+ storage.monitor role, which will allow only read-only access.
+ - This is specified as a mapping of regular expressions to a list of roles. See the examples.
+ - The roles that will be assigned to to the group/groups matching the provided regex.
+ - storage.admin allows users full read/write access to storage objects and operations.
+ - storage.monitor allows users read-only access to storage objects and operations.
+ - support.admin allows users access to hardware, diagnostic information, the Major Event
+ Log, and other critical support-related functionality, but not the storage configuration.
+ - security.admin allows users access to authentication/authorization configuration, as well
+ as the audit log configuration, and certification management.
+ type: dict
+ required: false
+ group_attributes:
+ description:
+ - The user attributes that should be considered for the group to role mapping.
+ - Typically this is used with something like "memberOf", and a user"s access is tested against group
+ membership or lack thereof.
+ type: list
+ default: ["memberOf"]
+ required: false
+ user_attribute:
+ description:
+ - This is the attribute we will use to match the provided username when a user attempts to
+ authenticate.
+ type: str
+ default: "sAMAccountName"
+ required: false
+notes:
+ - Check mode is supported
+ - This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for
+ authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given
+ different (or no), access to certain aspects of the system and API.
+ - The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible.
+ - Generally, you"ll need to get the details of your organization"s LDAP server before you"ll be able to configure
+ the system for using LDAP authentication; every implementation is likely to be very different.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy
+ v3.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Disable LDAP authentication
+ na_santricity_ldap:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: absent
+
+ - name: Remove the "default" LDAP domain configuration
+ na_santricity_ldap:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: absent
+ identifier: default
+
+ - name: Define a new LDAP domain, utilizing defaults where possible
+ na_santricity_ldap:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: enabled
+ bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com"
+ bind_password: "mySecretPass"
+ server: "ldap://example.com:389"
+ search_base: "OU=Users,DC=example,DC=com"
+ role_mappings:
+ ".*dist-dev-storage.*":
+ - storage.admin
+ - security.admin
+ - support.admin
+ - storage.monitor
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The ldap settings have been updated.
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+
+class NetAppESeriesLdap(NetAppESeriesModule):
+ NO_CHANGE_MSG = "No changes were necessary."
+ TEMPORARY_DOMAIN = "ANSIBLE_TMP_DOMAIN"
+
+ def __init__(self):
+ ansible_options = dict(state=dict(type="str", required=False, default="present", choices=["present", "absent", "disabled"]),
+ identifier=dict(type="str", required=False, default="default"),
+ bind_user=dict(type="str", required=False),
+ bind_password=dict(type="str", required=False, no_log=True),
+ names=dict(type="list", required=False),
+ server_url=dict(type="str", required=False),
+ search_base=dict(type="str", required=False),
+ role_mappings=dict(type="dict", required=False, no_log=True),
+ group_attributes=dict(type="list", default=["memberOf"], required=False),
+ user_attribute=dict(type="str", required=False, default="sAMAccountName"))
+
+ required_if = [["state", "present", ["server_url"]]]
+ required_together = [["bind_user", "bind_password"]]
+ super(NetAppESeriesLdap, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ required_together=required_together,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.id = args["identifier"]
+ self.bind_user = args["bind_user"]
+ self.bind_password = args["bind_password"]
+ self.names = args["names"]
+ self.server = args["server_url"]
+ self.search_base = args["search_base"]
+ self.role_mappings = args["role_mappings"]
+ self.group_attributes = args["group_attributes"]
+ self.user_attribute = args["user_attribute"]
+
+ if self.server and not self.names:
+ parts = urlparse.urlparse(self.server)
+ self.names = [parts.netloc.split(':')[0]]
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if self.is_embedded():
+ self.url_path_prefix = "storage-systems/1/"
+ elif self.ssid != "0" and self.ssid != "proxy":
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/storage-systems/1/" % self.ssid
+
+ self.existing_domain_ids = []
+ self.domain = {} # Existing LDAP domain
+ self.body = {} # Request body
+
+ def get_domains(self):
+ """Retrieve all domain information from storage system."""
+ domains = None
+ try:
+ rc, response = self.request(self.url_path_prefix + "ldap")
+ domains = response["ldapDomains"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve current LDAP configuration. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return domains
+
+ def build_request_body(self):
+ """Build the request body."""
+ self.body.update({"id": self.id, "groupAttributes": self.group_attributes, "ldapUrl": self.server, "names": self.names, "roleMapCollection": []})
+
+ if self.search_base:
+ self.body.update({"searchBase": self.search_base})
+ if self.user_attribute:
+ self.body.update({"userAttribute": self.user_attribute})
+ if self.bind_user and self.bind_password:
+ self.body.update({"bindLookupUser": {"password": self.bind_password, "user": self.bind_user}})
+ if self.role_mappings:
+ for regex, names in self.role_mappings.items():
+ for name in names:
+ self.body["roleMapCollection"].append({"groupRegex": regex, "ignorecase": True, "name": name})
+
+ def are_changes_required(self):
+ """Determine whether any changes are required and build request body."""
+ change_required = False
+ domains = self.get_domains()
+
+ if self.state == "disabled" and domains:
+ self.existing_domain_ids = [domain["id"] for domain in domains]
+ change_required = True
+
+ elif self.state == "present":
+ for domain in domains:
+ if self.id == domain["id"]:
+ self.domain = domain
+
+ if self.state == "absent":
+ change_required = True
+ elif (len(self.group_attributes) != len(domain["groupAttributes"]) or
+ any([a not in domain["groupAttributes"] for a in self.group_attributes])):
+ change_required = True
+ elif self.user_attribute != domain["userAttribute"]:
+ change_required = True
+ elif self.search_base.lower() != domain["searchBase"].lower():
+ change_required = True
+ elif self.server != domain["ldapUrl"]:
+ change_required = True
+ elif any(name not in domain["names"] for name in self.names) or any(name not in self.names for name in domain["names"]):
+ change_required = True
+ elif self.role_mappings:
+ if len(self.body["roleMapCollection"]) != len(domain["roleMapCollection"]):
+ change_required = True
+ else:
+ for role_map in self.body["roleMapCollection"]:
+ for existing_role_map in domain["roleMapCollection"]:
+ if role_map["groupRegex"] == existing_role_map["groupRegex"] and role_map["name"] == existing_role_map["name"]:
+ break
+ else:
+ change_required = True
+
+ if not change_required and self.bind_user and self.bind_password:
+ if self.bind_user != domain["bindLookupUser"]["user"]:
+ change_required = True
+ elif self.bind_password:
+ temporary_domain = None
+ try:
+ # Check whether temporary domain exists
+ if any(domain["id"] == self.TEMPORARY_DOMAIN for domain in domains):
+ self.delete_domain(self.TEMPORARY_DOMAIN)
+
+ temporary_domain = self.add_domain(temporary=True, skip_test=True)
+ rc, tests = self.request(self.url_path_prefix + "ldap/test", method="POST")
+
+ temporary_domain_test = {}
+ domain_test = {}
+ for test in tests:
+ if test["id"] == temporary_domain["id"]:
+ temporary_domain_test = test["result"]
+ if self.id == test["id"]:
+ domain_test = test["result"]
+
+ if temporary_domain_test["authenticationTestResult"] == "ok" and domain_test["authenticationTestResult"] != "ok":
+ change_required = True
+ elif temporary_domain_test["authenticationTestResult"] != "ok":
+ self.module.fail_json(msg="Failed to authenticate bind credentials! Array Id [%s]." % self.ssid)
+
+ finally:
+ if temporary_domain:
+ self.delete_domain(self.TEMPORARY_DOMAIN)
+ break
+ else:
+ change_required = True
+ elif self.state == "absent":
+ for domain in domains:
+ if self.id == domain["id"]:
+ change_required = True
+
+ return change_required
+
+ def add_domain(self, temporary=False, skip_test=False):
+ """Add domain to storage system."""
+ domain = None
+ body = self.body.copy()
+ if temporary:
+ body.update({"id": self.TEMPORARY_DOMAIN, "names": [self.TEMPORARY_DOMAIN]})
+
+ try:
+ rc, response = self.request(self.url_path_prefix + "ldap/addDomain?skipTest=%s" % ("true" if not skip_test else "false"),
+ method="POST", data=body)
+ domain = response["ldapDomains"][0]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return domain
+
+ def update_domain(self):
+ """Update existing domain on storage system."""
+ try:
+ rc, response = self.request(self.url_path_prefix + "ldap/%s" % self.domain["id"], method="POST", data=self.body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def delete_domain(self, domain_id):
+ """Delete specific domain on the storage system."""
+ try:
+ url = self.url_path_prefix + "ldap/%s" % domain_id
+ rc, response = self.request(self.url_path_prefix + "ldap/%s" % domain_id, method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def disable_domains(self):
+ """Delete all existing domains on storage system."""
+ for domain_id in self.existing_domain_ids:
+ self.delete_domain(domain_id)
+
+ def apply(self):
+ """Apply any necessary changes to the LDAP configuration."""
+ self.build_request_body()
+ change_required = self.are_changes_required()
+
+ if change_required and not self.module.check_mode:
+ if self.state == "present":
+ if self.domain:
+ self.update_domain()
+ self.module.exit_json(msg="LDAP domain has been updated. Array Id: [%s]" % self.ssid, changed=change_required)
+ else:
+ self.add_domain()
+ self.module.exit_json(msg="LDAP domain has been added. Array Id: [%s]" % self.ssid, changed=change_required)
+ elif self.state == "absent":
+ if self.domain:
+ self.delete_domain(self.domain["id"])
+ self.module.exit_json(msg="LDAP domain has been removed. Array Id: [%s]" % self.ssid, changed=change_required)
+ else:
+ self.disable_domains()
+ self.module.exit_json(msg="All LDAP domains have been removed. Array Id: [%s]" % self.ssid, changed=change_required)
+
+ self.module.exit_json(msg="No changes have been made to the LDAP configuration. Array Id: [%s]" % self.ssid, changed=change_required)
+
+
+def main():
+ ldap = NetAppESeriesLdap()
+ ldap.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py
new file mode 100644
index 00000000..d3d70fb5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: na_santricity_lun_mapping
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+short_description: NetApp E-Series manage lun mappings
+description:
+ - Create, delete, or modify mappings between a volume and a targeted host/host+ group.
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Present will ensure the mapping exists, absent will remove the mapping.
+ type: str
+ required: False
+ choices: ["present", "absent"]
+ default: "present"
+ target:
+ description:
+ - The name of host or hostgroup you wish to assign to the mapping
+ - If omitted, the default hostgroup is used.
+ - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
+ type: str
+ required: False
+ volume_name:
+ description:
+ - The name of the volume you wish to include in the mapping.
+ - Use ACCESS_VOLUME to reference the in-band access management volume.
+ type: str
+ required: True
+ aliases:
+ - volume
+ lun:
+ description:
+ - The LUN value you wish to give the mapping.
+ - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
+ - LUN value will be determine by the storage-system when not specified.
+ type: int
+ required: false
+'''
+
+EXAMPLES = '''
+---
+ - name: Map volume1 to the host target host1
+ na_santricity_lun_mapping:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ target: host1
+ volume: volume1
+ - name: Delete the lun mapping between volume1 and host1
+ na_santricity_lun_mapping:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: absent
+ target: host1
+ volume: volume1
+'''
+RETURN = '''
+msg:
+ description: success of the module
+ returned: always
+ type: str
+ sample: Lun mapping is complete
+'''
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesLunMapping(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(state=dict(required=False, choices=["present", "absent"], default="present"),
+ target=dict(required=False, default=None),
+ volume_name=dict(required=True, aliases=["volume"]),
+ lun=dict(type="int", required=False))
+
+ super(NetAppESeriesLunMapping, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.state = args["state"] == "present"
+ self.target = args["target"] if args["target"] else "DEFAULT_HOSTGROUP"
+ self.volume = args["volume_name"] if args["volume_name"] != "ACCESS_VOLUME" else "Access"
+ self.lun = args["lun"]
+ self.check_mode = self.module.check_mode
+ self.mapping_info = None
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ def update_mapping_info(self):
+ """Collect the current state of the storage array."""
+ response = None
+ try:
+ rc, response = self.request("storage-systems/%s/graph" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ # Create dictionary containing host/cluster references mapped to their names
+ target_reference = {}
+ target_name = {}
+ target_type = {}
+
+ for host in response["storagePoolBundle"]["host"]:
+ target_reference.update({host["hostRef"]: host["name"]})
+ target_name.update({host["name"]: host["hostRef"]})
+ target_type.update({host["name"]: "host"})
+
+ for cluster in response["storagePoolBundle"]["cluster"]:
+
+ # Verify there is no ambiguity between target's type (ie host and group have the same name)
+ if cluster["name"] == self.target and self.target in target_name.keys():
+ self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group targets! Id [%s]" % self.ssid)
+
+ target_reference.update({cluster["clusterRef"]: cluster["name"]})
+ target_name.update({cluster["name"]: cluster["clusterRef"]})
+ target_type.update({cluster["name"]: "group"})
+
+ target_reference.update({"0000000000000000000000000000000000000000": "DEFAULT_HOSTGROUP"})
+ target_name.update({"DEFAULT_HOSTGROUP": "0000000000000000000000000000000000000000"})
+ target_type.update({"DEFAULT_HOSTGROUP": "group"})
+
+ volume_reference = {}
+ volume_name = {}
+ lun_name = {}
+ for volume in response["volume"]:
+ volume_reference.update({volume["volumeRef"]: volume["name"]})
+ volume_name.update({volume["name"]: volume["volumeRef"]})
+ if volume["listOfMappings"]:
+ lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
+ for volume in response["highLevelVolBundle"]["thinVolume"]:
+ volume_reference.update({volume["volumeRef"]: volume["name"]})
+ volume_name.update({volume["name"]: volume["volumeRef"]})
+ if volume["listOfMappings"]:
+ lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
+
+ volume_name.update({response["sa"]["accessVolume"]["name"]: response["sa"]["accessVolume"]["accessVolumeRef"]})
+ volume_reference.update({response["sa"]["accessVolume"]["accessVolumeRef"]: response["sa"]["accessVolume"]["name"]})
+
+ # Build current mapping object
+ self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"],
+ map_reference=mapping["mapRef"],
+ lun_mapping_reference=mapping["lunMappingRef"],
+ lun=mapping["lun"]
+ ) for mapping in response["storagePoolBundle"]["lunMapping"]],
+ volume_by_reference=volume_reference,
+ volume_by_name=volume_name,
+ lun_by_name=lun_name,
+ target_by_reference=target_reference,
+ target_by_name=target_name,
+ target_type_by_name=target_type)
+
+ def get_lun_mapping(self):
+ """Find the matching lun mapping reference.
+
+ Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun
+ """
+ target_match = False
+ reference = None
+ lun = None
+
+ self.update_mapping_info()
+
+ # Verify that when a lun is specified that it does not match an existing lun value unless it is associated with
+ # the specified volume (ie for an update)
+ if self.lun and any((self.lun == lun_mapping["lun"] and
+ self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and
+ self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]]
+ ) for lun_mapping in self.mapping_info["lun_mapping"]):
+ self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid)
+
+ # Verify volume and target exist if needed for expected state.
+ if self.state:
+ if self.volume not in self.mapping_info["volume_by_name"].keys():
+ self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid)
+ if self.target and self.target not in self.mapping_info["target_by_name"].keys():
+ self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid)
+
+ for lun_mapping in self.mapping_info["lun_mapping"]:
+
+ # Find matching volume reference
+ if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]:
+ reference = lun_mapping["lun_mapping_reference"]
+ lun = lun_mapping["lun"]
+
+ # Determine if lun mapping is attached to target with the
+ if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and
+ self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and
+ (self.lun is None or lun == self.lun)):
+ target_match = True
+
+ return target_match, reference, lun
+
+ def update(self):
+ """Execute the changes the require changes on the storage array."""
+ target_match, lun_reference, lun = self.get_lun_mapping()
+ update = (self.state and not target_match) or (not self.state and lun_reference)
+
+ if update and not self.check_mode:
+ try:
+ if self.state:
+ body = dict()
+ target = None if not self.target else self.mapping_info["target_by_name"][self.target]
+ if target:
+ body.update(dict(targetId=target))
+ if self.lun is not None:
+ body.update(dict(lun=self.lun))
+
+ if lun_reference:
+
+ rc, response = self.request("storage-systems/%s/volume-mappings/%s/move" % (self.ssid, lun_reference), method="POST", data=body)
+ else:
+ body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume]))
+ rc, response = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=body)
+
+ else: # Remove existing lun mapping for volume and target
+ rc, response = self.request("storage-systems/%s/volume-mappings/%s" % (self.ssid, lun_reference), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ self.module.exit_json(msg="Lun mapping is complete.", changed=update)
+
+
+def main():
+ mapping = NetAppESeriesLunMapping()
+ mapping.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py
new file mode 100644
index 00000000..6fd7c7f1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_mgmt_interface
+short_description: NetApp E-Series manage management interface configuration
+description:
+ - Configure the E-Series management interfaces
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Enable or disable IPv4 network interface configuration.
+ - Either IPv4 or IPv6 must be enabled otherwise error will occur.
+ choices:
+ - enabled
+ - disabled
+ default: enabled
+ type: str
+ required: false
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are represented alphabetically, with the first controller as A,
+ the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
+ limitation and could change in the future.
+ choices:
+ - A
+ - B
+ type: str
+ required: true
+ port:
+ description:
+ - The ethernet port configuration to modify.
+ - The channel represents the port number left to right on the controller, beginning with 1.
+ type: int
+ required: true
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ config_method:
+ description:
+ - The configuration method type to use for network interface ports.
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ choices:
+ - dhcp
+ - static
+ type: str
+ required: false
+ dns_config_method:
+ description:
+ - The configuration method type to use for DNS services.
+ - dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup).
+ choices:
+ - dhcp
+ - static
+ type: str
+ required: false
+ dns_address:
+ description:
+ - Primary IPv4 DNS server address
+ type: str
+ required: false
+ dns_address_backup:
+ description:
+ - Backup IPv4 DNS server address
+ - Queried when primary DNS server fails
+ type: str
+ required: false
+ ntp_config_method:
+ description:
+ - The configuration method type to use for NTP services.
+ - disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
+ - dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
+ choices:
+ - disabled
+ - dhcp
+ - static
+ type: str
+ required: false
+ ntp_address:
+ description:
+ - Primary IPv4 NTP server address
+ type: str
+ required: false
+ ntp_address_backup:
+ description:
+ - Backup IPv4 NTP server address
+ - Queried when primary NTP server fails
+ type: str
+ required: false
+ ssh:
+ description:
+ - Enable ssh access to the controller for debug purposes.
+ - This is a controller-level setting.
+ - rlogin/telnet will be enabled for ancient equipment where ssh is not available.
+ type: bool
+ required: false
+notes:
+ - Check mode is supported.
+ - It is highly recommended to have a minimum of one up management port on each controller.
+ - When using SANtricity Web Services Proxy, use M(netapp_eseries.santricity.na_santricity_storage_system) to update management paths. This is required because of a known issue
+ and will be addressed in the proxy version 4.1. After the resolution the management ports should automatically be updated.
+ - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
+ via dhcp, etc), can take seconds or minutes longer to take effect.
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ na_santricity_mgmt_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "1"
+ controller: "A"
+ config_method: static
+ address: "192.168.1.100"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.1.1"
+
+ - name: Disable ipv4 connectivity for the second port on the B controller
+ na_santricity_mgmt_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "2"
+ controller: "B"
+ enable_interface: no
+
+ - name: Enable ssh access for ports one and two on controller A
+ na_santricity_mgmt_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "1"
+ controller: "A"
+ ssh: yes
+
+ - name: Configure static DNS settings for the first port on controller A
+ na_santricity_mgmt_interface:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ port: "1"
+ controller: "A"
+ dns_config_method: static
+ dns_address: "192.168.1.100"
+ dns_address_backup: "192.168.1.1"
+
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+available_embedded_api_urls:
+ description: List containing available web services embedded REST API urls
+ returned: on success
+ type: list
+ sample:
+"""
+from time import sleep
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+
+class NetAppESeriesMgmtInterface(NetAppESeriesModule):
+ MAXIMUM_VERIFICATION_TIMEOUT = 120
+
+ def __init__(self):
+ ansible_options = dict(state=dict(type="str", choices=["enabled", "disabled"], default="enabled", required=False),
+ controller=dict(type="str", required=True, choices=["A", "B"]),
+ port=dict(type="int"),
+ address=dict(type="str", required=False),
+ subnet_mask=dict(type="str", required=False),
+ gateway=dict(type="str", required=False),
+ config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
+ dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
+ dns_address=dict(type="str", required=False),
+ dns_address_backup=dict(type="str", required=False),
+ ntp_config_method=dict(type="str", required=False, choices=["disabled", "dhcp", "static"]),
+ ntp_address=dict(type="str", required=False),
+ ntp_address_backup=dict(type="str", required=False),
+ ssh=dict(type="bool", required=False))
+
+ required_if = [["state", "enable", ["config_method"]],
+ ["config_method", "static", ["address", "subnet_mask"]],
+ ["dns_config_method", "static", ["dns_address"]],
+ ["ntp_config_method", "static", ["ntp_address"]]]
+
+ super(NetAppESeriesMgmtInterface, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.enable_interface = args["state"] == "enabled"
+ self.controller = args["controller"]
+ self.channel = args["port"]
+
+ self.config_method = args["config_method"]
+ self.address = args["address"]
+ self.subnet_mask = args["subnet_mask"]
+ self.gateway = args["gateway"]
+
+ self.dns_config_method = args["dns_config_method"]
+ self.dns_address = args["dns_address"]
+ self.dns_address_backup = args["dns_address_backup"]
+
+ self.ntp_config_method = args["ntp_config_method"]
+ self.ntp_address = args["ntp_address"]
+ self.ntp_address_backup = args["ntp_address_backup"]
+
+ self.ssh = args["ssh"]
+
+ self.body = {}
+ self.interface_info = {}
+ self.alt_interface_addresses = []
+ self.all_interface_addresses = []
+ self.use_alternate_address = False
+ self.alt_url_path = None
+
+ self.available_embedded_api_urls = []
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ :return: controllers defined on the system. Example: {'A': '070000000000000000000001', 'B': '070000000000000000000002'}
+ """
+ try:
+ rc, controllers = self.request("storage-systems/%s/controllers" % self.ssid)
+ except Exception as err:
+ controllers = list()
+ self.module.fail_json(msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ controllers.sort(key=lambda c: c['physicalLocation']['slot'])
+ controllers_dict = dict()
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ settings = dict(controllerSlot=controller['physicalLocation']['slot'],
+ controllerRef=controller['controllerRef'],
+ ssh=controller['networkSettings']['remoteAccessEnabled'])
+ controllers_dict[label] = settings
+ i += 1
+ return controllers_dict
+
+ def update_target_interface_info(self, retries=60):
+ """Discover and update cached interface info."""
+ net_interfaces = list()
+ try:
+ rc, net_interfaces = self.request("storage-systems/%s/configuration/ethernet-interfaces" % self.ssid)
+ except Exception as error:
+ if retries > 0:
+ self.update_target_interface_info(retries=retries - 1)
+ return
+ else:
+ self.module.fail_json(msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ iface = None
+ channels = {}
+ controller_info = self.get_controllers()[self.controller]
+ controller_ref = controller_info["controllerRef"]
+ controller_ssh = controller_info["ssh"]
+ for net in net_interfaces:
+ if net["controllerRef"] == controller_ref:
+ channels.update({net["channel"]: net["linkStatus"]})
+ if net["ipv4Enabled"] and net["linkStatus"] == "up":
+ self.all_interface_addresses.append(net["ipv4Address"])
+ if net["controllerRef"] == controller_ref and net["channel"] == self.channel:
+ iface = net
+ elif net["ipv4Enabled"] and net["linkStatus"] == "up":
+ self.alt_interface_addresses.append(net["ipv4Address"])
+
+ if iface is None:
+ available_controllers = ["%s (%s)" % (channel, status) for channel, status in channels.items()]
+ self.module.fail_json(msg="Invalid port number! Controller %s ports: [%s]. Array [%s]"
+ % (self.controller, ",".join(available_controllers), self.ssid))
+
+ self.interface_info.update({"channel": iface["channel"],
+ "link_status": iface["linkStatus"],
+ "enabled": iface["ipv4Enabled"],
+ "address": iface["ipv4Address"],
+ "gateway": iface["ipv4GatewayAddress"],
+ "subnet_mask": iface["ipv4SubnetMask"],
+ "dns_config_method": iface["dnsProperties"]["acquisitionProperties"]["dnsAcquisitionType"],
+ "dns_servers": iface["dnsProperties"]["acquisitionProperties"]["dnsServers"],
+ "ntp_config_method": iface["ntpProperties"]["acquisitionProperties"]["ntpAcquisitionType"],
+ "ntp_servers": iface["ntpProperties"]["acquisitionProperties"]["ntpServers"],
+ "config_method": iface["ipv4AddressConfigMethod"],
+ "controllerRef": iface["controllerRef"],
+ "controllerSlot": iface["controllerSlot"],
+ "ipv6_enabled": iface["ipv6Enabled"],
+ "id": iface["interfaceRef"],
+ "ssh": controller_ssh})
+
+ def update_body_enable_interface_setting(self):
+ """Enable or disable the IPv4 network interface."""
+ change_required = False
+ if not self.enable_interface and not self.interface_info["ipv6_enabled"]:
+ self.module.fail_json(msg="Either IPv4 or IPv6 must be enabled. Array [%s]." % self.ssid)
+
+ if self.enable_interface != self.interface_info["enabled"]:
+ change_required = True
+ self.body.update({"ipv4Enabled": self.enable_interface})
+ return change_required
+
+ def update_body_interface_settings(self):
+ """Update network interface settings."""
+ change_required = False
+ if self.config_method == "dhcp":
+ if self.interface_info["config_method"] != "configDhcp":
+ if self.interface_info["address"] in self.url:
+ self.use_alternate_address = True
+ change_required = True
+ self.body.update({"ipv4AddressConfigMethod": "configDhcp"})
+ else:
+ self.body.update({"ipv4AddressConfigMethod": "configStatic", "ipv4Address": self.address, "ipv4SubnetMask": self.subnet_mask})
+ if self.interface_info["config_method"] != "configStatic":
+ change_required = True
+ if self.address and self.interface_info["address"] != self.address:
+ if self.interface_info["address"] in self.url:
+ self.use_alternate_address = True
+ change_required = True
+ if self.subnet_mask and self.interface_info["subnet_mask"] != self.subnet_mask:
+ change_required = True
+ if self.gateway and self.interface_info["gateway"] != self.gateway:
+ self.body.update({"ipv4GatewayAddress": self.gateway})
+ change_required = True
+
+ return change_required
+
+ def update_body_dns_server_settings(self):
+ """Add DNS server information to the request body."""
+ change_required = False
+ if self.dns_config_method == "dhcp":
+ if self.interface_info["dns_config_method"] != "dhcp":
+ change_required = True
+ self.body.update({"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "dhcp"}})
+
+ elif self.dns_config_method == "static":
+ dns_servers = [dict(addressType="ipv4", ipv4Address=self.dns_address)]
+ if self.dns_address_backup:
+ dns_servers.append(dict(addressType="ipv4", ipv4Address=self.dns_address_backup))
+
+ if (self.interface_info["dns_config_method"] != "stat" or
+ len(self.interface_info["dns_servers"]) != len(dns_servers) or
+ (len(self.interface_info["dns_servers"]) == 2 and
+ (self.interface_info["dns_servers"][0]["ipv4Address"] != self.dns_address or
+ self.interface_info["dns_servers"][1]["ipv4Address"] != self.dns_address_backup)) or
+ (len(self.interface_info["dns_servers"]) == 1 and
+ self.interface_info["dns_servers"][0]["ipv4Address"] != self.dns_address)):
+ change_required = True
+ self.body.update({"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "stat", "dnsServers": dns_servers}})
+ return change_required
+
+ def update_body_ntp_server_settings(self):
+ """Add NTP server information to the request body."""
+ change_required = False
+ if self.ntp_config_method == "disabled":
+ if self.interface_info["ntp_config_method"] != "disabled":
+ change_required = True
+ self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "disabled"}})
+
+ elif self.ntp_config_method == "dhcp":
+ if self.interface_info["ntp_config_method"] != "dhcp":
+ change_required = True
+ self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "dhcp"}})
+
+ elif self.ntp_config_method == "static":
+ ntp_servers = [{"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": self.ntp_address}}]
+ if self.ntp_address_backup:
+ ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": self.ntp_address_backup}})
+
+ if (self.interface_info["ntp_config_method"] != "stat" or
+ len(self.interface_info["ntp_servers"]) != len(ntp_servers) or
+ ((len(self.interface_info["ntp_servers"]) == 2 and
+ (self.interface_info["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address or
+ self.interface_info["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup)) or
+ (len(self.interface_info["ntp_servers"]) == 1 and
+ ((self.interface_info["ntp_servers"][0]["addrType"] == "ipvx" and
+ self.interface_info["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address) or
+ (self.interface_info["ntp_servers"][0]["addrType"] == "domainName" and
+ self.interface_info["ntp_servers"][0]["domainName"] != self.ntp_address))))):
+ change_required = True
+ self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "stat", "ntpServers": ntp_servers}})
+ return change_required
+
+ def update_body_ssh_setting(self):
+ """Configure network interface ports for remote ssh access."""
+ change_required = False
+ if self.interface_info["ssh"] != self.ssh:
+ change_required = True
+ self.body.update({"enableRemoteAccess": self.ssh})
+ return change_required
+
+ def update_request_body(self):
+ """Verify all required changes have been made."""
+ self.update_target_interface_info()
+ self.body = {"controllerRef": self.get_controllers()[self.controller]["controllerRef"], "interfaceRef": self.interface_info["id"]}
+
+ change_required = False
+ if self.enable_interface is not None:
+ change_required = self.update_body_enable_interface_setting()
+ if self.config_method is not None:
+ change_required = self.update_body_interface_settings() or change_required
+ if self.dns_config_method is not None:
+ change_required = self.update_body_dns_server_settings() or change_required
+ if self.ntp_config_method is not None:
+ change_required = self.update_body_ntp_server_settings() or change_required
+ if self.ssh is not None:
+ change_required = self.update_body_ssh_setting() or change_required
+
+ self.module.log("update_request_body change_required: %s" % change_required)
+ return change_required
+
+ def update_url(self, retries=60):
+ """Update eseries base class url if on is available."""
+ for address in self.alt_interface_addresses:
+ if address not in self.url and address != "0.0.0.0":
+ parsed_url = urlparse.urlparse(self.url)
+ location = parsed_url.netloc.split(":")
+ location[0] = address
+ self.url = "%s://%s/" % (parsed_url.scheme, ":".join(location))
+ self.available_embedded_api_urls = ["%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH)]
+ self.module.warn("Using alternate address [%s]" % self.available_embedded_api_urls[0])
+ break
+ else:
+ if retries > 0:
+ sleep(1)
+ self.update_target_interface_info()
+ self.update_url(retries=retries - 1)
+ else:
+ self.module.warn("Unable to obtain an alternate url!")
+
+ def update(self):
+ """Update controller with new interface, dns service, ntp service and/or remote ssh access information."""
+ change_required = self.update_request_body()
+
+ # Build list of available web services rest api urls
+ self.available_embedded_api_urls = []
+ parsed_url = urlparse.urlparse(self.url)
+ location = parsed_url.netloc.split(":")
+ for address in self.all_interface_addresses:
+ location[0] = address
+ self.available_embedded_api_urls = ["%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH)]
+
+ if change_required and not self.module.check_mode:
+
+ # Update url if currently used interface will be modified
+ if self.is_embedded():
+ if self.use_alternate_address:
+ self.update_url()
+ if self.address:
+ parsed_url = urlparse.urlparse(self.url)
+ location = parsed_url.netloc.split(":")
+ location[0] = self.address
+ self.available_embedded_api_urls.append("%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH))
+ else:
+ self.available_embedded_api_urls = ["%s/%s" % (self.url, self.DEFAULT_REST_API_PATH)]
+
+ # Update management interface
+ try:
+ rc, response = self.request("storage-systems/%s/configuration/ethernet-interfaces" % self.ssid, method="POST", data=self.body)
+ except Exception as error:
+ pass
+
+ # Validate all changes have been made
+ for retries in range(self.MAXIMUM_VERIFICATION_TIMEOUT):
+ if not self.update_request_body():
+ break
+ sleep(1)
+ else:
+ self.module.warn("Changes failed to complete! Timeout waiting for management interface to update. Array [%s]." % self.ssid)
+ self.module.exit_json(msg="The interface settings have been updated.", changed=change_required,
+ available_embedded_api_urls=self.available_embedded_api_urls)
+ self.module.exit_json(msg="No changes are required.", changed=change_required,
+ available_embedded_api_urls=self.available_embedded_api_urls if self.is_embedded() else [])
+
+
+def main():
+ interface = NetAppESeriesMgmtInterface()
+ interface.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py
new file mode 100644
index 00000000..3edf725a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_nvme_interface
+short_description: NetApp E-Series manage NVMe interface configuration
+description: Configure settings of an E-Series NVMe interface
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ address:
+ description:
+ - The IPv4 address to assign to the NVMe interface
+ type: str
+ required: false
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Only applicable when configuring RoCE
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Only applicable when configuring RoCE
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: false
+ config_method:
+ description:
+ - The configuration method type to use for this interface.
+ - Only applicable when configuring RoCE
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ type: str
+ choices:
+ - dhcp
+ - static
+ required: false
+ default: dhcp
+ mtu:
+ description:
+ - The maximum transmission units (MTU), in bytes.
+ - Only applicable when configuring RoCE
+ - This allows you to configure a larger value for the MTU, in order to enable jumbo frames
+ (any value > 1500).
+ - Generally, it is necessary to have your host, switches, and other components not only support jumbo
+ frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
+ leave this at the default.
+ type: int
+ default: 1500
+ required: false
+ aliases:
+ - max_frame_size
+ speed:
+ description:
+ - This is the ethernet port speed measured in Gb/s.
+ - Value must be a supported speed or auto for automatically negotiating the speed with the port.
+ - Only applicable when configuring RoCE
+ - The configured ethernet port speed should match the speed capability of the SFP on the selected port.
+ type: str
+ required: false
+ default: auto
+ state:
+ description:
+ - Whether or not the specified RoCE interface should be enabled.
+ - Only applicable when configuring RoCE
+ choices:
+ - enabled
+ - disabled
+ type: str
+ required: false
+ default: enabled
+ channel:
+ description:
+ - This option specifies the which NVMe controller channel to configure.
+ - The list of choices is not necessarily comprehensive. It depends on the number of ports
+ that are available in the system.
+ - The numerical value represents the number of the channel (typically from left to right on the HIC),
+ beginning with a value of 1.
+ type: int
+ required: false
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are presented alphabetically, with the first controller as A and the second as B.
+ type: str
+ required: false
+ choices: [A, B]
+"""
+EXAMPLES = """
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+"""
+import re
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesNvmeInterface(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(address=dict(type="str", required=False),
+ subnet_mask=dict(type="str", required=False),
+ gateway=dict(type="str", required=False),
+ config_method=dict(type="str", required=False, default="dhcp", choices=["dhcp", "static"]),
+ mtu=dict(type="int", default=1500, required=False, aliases=["max_frame_size"]),
+ speed=dict(type="str", default="auto", required=False),
+ state=dict(type="str", default="enabled", required=False, choices=["enabled", "disabled"]),
+ channel=dict(type="int", required=True),
+ controller=dict(type="str", required=True, choices=["A", "B"]))
+
+ required_if = [["config_method", "static", ["address", "subnet_mask"]]]
+ super(NetAppESeriesNvmeInterface, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ required_if=required_if,
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.address = args["address"]
+ self.subnet_mask = args["subnet_mask"]
+ self.gateway = args["gateway"]
+ self.config_method = "configDhcp" if args["config_method"] == "dhcp" else "configStatic"
+ self.mtu = args["mtu"]
+ self.speed = args["speed"]
+ self.enabled = args["state"] == "enabled"
+ self.channel = args["channel"]
+ self.controller = args["controller"]
+
+ address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+ if self.address and not address_regex.match(self.address):
+ self.module.fail_json(msg="An invalid ip address was provided for address.")
+ if self.subnet_mask and not address_regex.match(self.subnet_mask):
+ self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.")
+ if self.gateway and not address_regex.match(self.gateway):
+ self.module.fail_json(msg="An invalid ip address was provided for gateway.")
+
+ self.get_target_interface_cache = None
+
+ def get_nvmeof_interfaces(self):
+ """Retrieve all interfaces that are using nvmeof"""
+ ifaces = list()
+ try:
+ rc, ifaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ # Filter out all not nvme-nvmeof hostside interfaces.
+ nvmeof_ifaces = []
+ for iface in ifaces:
+ interface_type = iface["ioInterfaceTypeData"]["interfaceType"]
+ properties = iface["commandProtocolPropertiesList"]["commandProtocolProperties"]
+
+ try:
+ link_status = iface["ioInterfaceTypeData"]["ib"]["linkState"]
+ except Exception as error:
+ link_status = iface["ioInterfaceTypeData"]["ethernet"]["interfaceData"]["ethernetData"]["linkStatus"]
+
+ if (properties and properties[0]["commandProtocol"] == "nvme" and
+ properties[0]["nvmeProperties"]["commandSet"] == "nvmeof"):
+ nvmeof_ifaces.append({"properties": properties[0]["nvmeProperties"]["nvmeofProperties"],
+ "reference": iface["interfaceRef"],
+ "channel": iface["ioInterfaceTypeData"][iface["ioInterfaceTypeData"]["interfaceType"]]["channel"],
+ "interface_type": interface_type,
+ "interface": iface["ioInterfaceTypeData"][interface_type],
+ "controller_id": iface["controllerRef"],
+ "link_status": link_status})
+ return nvmeof_ifaces
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references"""
+ controllers = list()
+ try:
+ rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ controllers.sort()
+ controllers_dict = {}
+ i = ord("A")
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[label] = controller
+ i += 1
+
+ return controllers_dict
+
+ def get_target_interface(self):
+ """Retrieve the targeted controller interface"""
+ if self.get_target_interface_cache is None:
+ ifaces = self.get_nvmeof_interfaces()
+ controller_id = self.get_controllers()[self.controller]
+
+ controller_ifaces = []
+ for iface in ifaces:
+ if iface["controller_id"] == controller_id:
+ controller_ifaces.append(iface)
+
+ sorted_controller_ifaces = sorted(controller_ifaces, key=lambda x: x["channel"])
+ if self.channel < 1 or self.channel > len(controller_ifaces):
+ status_msg = ", ".join(["%s (link %s)" % (index + 1, iface["link_status"])
+ for index, iface in enumerate(sorted_controller_ifaces)])
+ self.module.fail_json(msg="Invalid controller %s NVMe channel. Available channels: %s, Array Id [%s]."
+ % (self.controller, status_msg, self.ssid))
+
+ self.get_target_interface_cache = sorted_controller_ifaces[self.channel - 1]
+
+ return self.get_target_interface_cache
+
+ def update(self):
+ """Update the storage system's controller nvme interface if needed."""
+ update_required = False
+ body = {}
+
+ iface = self.get_target_interface()
+ if iface["properties"]["provider"] == "providerInfiniband":
+ if (iface["properties"]["ibProperties"]["ipAddressData"]["addressType"] != "ipv4" or
+ iface["properties"]["ibProperties"]["ipAddressData"]["ipv4Data"]["ipv4Address"] != self.address):
+ update_required = True
+ body = {"settings": {"ibSettings": {"networkSettings": {"ipv4Address": self.address}}}}
+
+ elif iface["properties"]["provider"] == "providerRocev2":
+ interface_data = iface["interface"]["interfaceData"]["ethernetData"]
+ current_speed = interface_data["currentInterfaceSpeed"].lower().replace("speed", "").replace("gig", "")
+ interface_supported_speeds = [str(speed).lower().replace("speed", "").replace("gig", "")
+ for speed in interface_data["supportedInterfaceSpeeds"]]
+ if self.speed not in interface_supported_speeds:
+ self.module.fail_json(msg="Unsupported interface speed! Options %s. Array [%s]."
+ % (interface_supported_speeds, self.ssid))
+
+ roce_properties = iface["properties"]["roceV2Properties"]
+ if self.enabled != roce_properties["ipv4Enabled"]:
+ update_required = True
+ if self.address and roce_properties["ipv4Data"]["ipv4AddressConfigMethod"] != self.config_method:
+ update_required = True
+ if self.address and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4Address"] != self.address:
+ update_required = True
+ if self.subnet_mask and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4SubnetMask"] != self.subnet_mask:
+ update_required = True
+ if self.gateway and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4GatewayAddress"] != self.gateway:
+ update_required = True
+ if self.speed and self.speed != current_speed:
+ update_required = True
+ if (self.mtu and iface["interface"]["interfaceData"]["ethernetData"][
+ "maximumFramePayloadSize"] != self.mtu):
+ update_required = True
+
+ if update_required:
+ body = {"id": iface["reference"], "settings": {"roceV2Settings": {
+ "networkSettings": {"ipv4Enabled": self.enabled,
+ "ipv4Settings": {"configurationMethod": self.config_method}}}}}
+
+ if self.config_method == "configStatic":
+ if self.address:
+ body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update(
+ {"address": self.address})
+ if self.subnet_mask:
+ body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update(
+ {"subnetMask": self.subnet_mask})
+ if self.gateway:
+ body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update(
+ {"gatewayAddress": self.gateway})
+ if self.speed:
+ if self.speed == "auto":
+ body["settings"]["roceV2Settings"]["networkSettings"].update({"interfaceSpeed": "speedAuto"})
+ else:
+ body["settings"]["roceV2Settings"]["networkSettings"].update(
+ {"interfaceSpeed": "speed%sgig" % self.speed})
+ if self.mtu:
+ body["settings"]["roceV2Settings"]["networkSettings"].update({"interfaceMtu": self.mtu})
+
+ if update_required and not self.module.check_mode:
+ try:
+ rc, iface = self.request("storage-systems/%s/nvmeof/interfaces/%s" % (self.ssid, iface["reference"]),
+ method="POST", data=body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to configure interface. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ self.module.exit_json(msg="NVMeoF interface settings have been updated.", changed=update_required)
+ self.module.exit_json(msg="No changes have been made.", changed=update_required)
+
+
+def main():
+ nvmeof_interface = NetAppESeriesNvmeInterface()
+ nvmeof_interface.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py
new file mode 100644
index 00000000..715467e1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_proxy_drive_firmware_upload
+short_description: NetApp E-Series manage proxy drive firmware files
+description:
+ - Ensure drive firmware files are available on SANtricity Web Service Proxy.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_proxy_doc
+options:
+ firmware:
+ description:
+ - This option can be a list of file paths and/or directories containing drive firmware.
+ - Note that only files with the extension .dlp will be attempted to be added to the proxy; all other files will be ignored.
+ - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
+ type: list
+ required: false
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ na_santricity_proxy_drive_firmware_upload:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ firmware:
+ - "path/to/drive_firmware_file1.dlp"
+ - "path/to/drive_firmware_file2.dlp"
+ - "path/to/drive_firmware_directory"
+"""
+RETURN = """
+msg:
+ description: Whether any changes have been made to the collection of drive firmware on SANtricity Web Services Proxy.
+ type: str
+ returned: always
+"""
+import os
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+
+
+class NetAppESeriesProxyDriveFirmwareUpload(NetAppESeriesModule):
+ WAIT_TIMEOUT_SEC = 60 * 15
+
+ def __init__(self):
+ ansible_options = dict(firmware=dict(type="list", required=False))
+
+ super(NetAppESeriesProxyDriveFirmwareUpload, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True,
+ proxy_specific_task=True)
+ args = self.module.params
+ self.firmware = args["firmware"]
+
+ self.files = None
+ self.add_files = []
+ self.remove_files = []
+ self.upload_failures = []
+
+ def determine_file_paths(self):
+ """Determine all the drive firmware file paths."""
+ self.files = {}
+ if self.firmware:
+ for path in self.firmware:
+
+ if not os.path.exists(path):
+ self.module.fail_json(msg="Drive firmware file does not exist! File [%s]" % path)
+ elif os.path.isdir(path):
+ if not path.endswith("/"):
+ path = path + "/"
+ for dir_filename in os.listdir(path):
+ if ".dlp" in dir_filename:
+ self.files.update({dir_filename: path + dir_filename})
+ elif ".dlp" in path:
+ name = os.path.basename(path)
+ self.files.update({name: path})
+
+ self.module.warn("%s" % self.files)
+
+ def determine_changes(self):
+ """Determine whether drive firmware files should be uploaded to the proxy."""
+ try:
+ rc, results = self.request("files/drive")
+ current_files = [result["fileName"] for result in results]
+
+ for current_file in current_files:
+ if current_file not in self.files.keys():
+ self.remove_files.append(current_file)
+
+ for expected_file in self.files.keys():
+ if expected_file not in current_files:
+ self.add_files.append(expected_file)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve proxy drive firmware file list. Error [%s]" % error)
+
+ def upload_files(self):
+ """Add drive firmware file to the proxy."""
+ for filename in self.add_files:
+ firmware_name = os.path.basename(filename)
+ files = [("file", firmware_name, self.files[filename])]
+ headers, data = create_multipart_formdata(files)
+ try:
+ rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
+ except Exception as error:
+ self.upload_failures.append(filename)
+ self.module.warn("Failed to upload drive firmware file. File [%s]." % firmware_name)
+
+ def delete_files(self):
+ """Remove drive firmware file to the proxy."""
+ for filename in self.remove_files:
+ try:
+ rc, response = self.request("files/drive/%s" % filename, method="DELETE")
+ except Exception as error:
+ self.upload_failures.append(filename)
+ self.module.warn("Failed to delete drive firmware file. File [%s]" % filename)
+
+ def apply(self):
+ """Apply state to the web services proxy."""
+ change_required = False
+ if not self.is_proxy():
+ self.module.fail_json(msg="Module can only be executed against SANtricity Web Services Proxy.")
+
+ self.determine_file_paths()
+ self.determine_changes()
+
+ if self.add_files or self.remove_files:
+ change_required = True
+
+ if change_required and not self.module.check_mode:
+ self.upload_files()
+ self.delete_files()
+
+ self.module.exit_json(changed=change_required, files_added=self.add_files, files_removed=self.remove_files)
+
+
+def main():
+ proxy_firmware_upload = NetAppESeriesProxyDriveFirmwareUpload()
+ proxy_firmware_upload.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py
new file mode 100644
index 00000000..68183fd5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_proxy_firmware_upload
+short_description: NetApp E-Series manage proxy firmware uploads.
+description:
+ - Ensure specific firmware versions are available on SANtricity Web Services Proxy.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_proxy_doc
+options:
+ firmware:
+ description:
+ - List of paths and/or directories containing firmware/NVSRAM files.
+ - All firmware/NVSRAM files that are not specified will be removed from the proxy if they exist.
+ type: list
+ required: false
+"""
+EXAMPLES = """
+- name: Ensure proxy has the expected firmware versions.
+ na_santricity_proxy_firmware_upload:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ firmware:
+ - "path/to/firmware/dlp_files"
+ - "path/to/nvsram.dlp"
+ - "path/to/firmware.dlp"
+"""
+RETURN = """
+msg:
+ description: Status and version of firmware and NVSRAM.
+ type: str
+ returned: always
+ sample:
+"""
+import os
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request
+
+
+class NetAppESeriesProxyFirmwareUpload(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(firmware=dict(type="list", required=False))
+ super(NetAppESeriesProxyFirmwareUpload, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True,
+ proxy_specific_task=True)
+
+ args = self.module.params
+ self.firmware = args["firmware"]
+ self.files = None
+ self.add_files = []
+ self.remove_files = []
+ self.upload_failures = []
+
+ def determine_file_paths(self):
+ """Determine all the drive firmware file paths."""
+ self.files = {}
+ if self.firmware:
+ for firmware_path in self.firmware:
+
+ if not os.path.exists(firmware_path):
+ self.module.fail_json(msg="Drive firmware file does not exist! File [%s]" % firmware_path)
+ elif os.path.isdir(firmware_path):
+ if not firmware_path.endswith("/"):
+ firmware_path = firmware_path + "/"
+
+ for dir_filename in os.listdir(firmware_path):
+ if ".dlp" in dir_filename:
+ self.files.update({dir_filename: firmware_path + dir_filename})
+ elif ".dlp" in firmware_path:
+ name = os.path.basename(firmware_path)
+ self.files.update({name: firmware_path})
+
+ def determine_changes(self):
+ """Determine whether files need to be added or removed."""
+ try:
+ rc, results = self.request("firmware/cfw-files")
+ current_files = [result["filename"] for result in results]
+
+ for current_file in current_files:
+ if current_file not in self.files.keys():
+ self.remove_files.append(current_file)
+
+ for expected_file in self.files.keys():
+ if expected_file not in current_files:
+ self.add_files.append(expected_file)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve current firmware file listing.")
+
+ def upload_files(self):
+ """Upload firmware and nvsram file."""
+ for filename in self.add_files:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", filename, self.files[filename])]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload/", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.upload_failures.append(filename)
+ self.module.warn(msg="Failed to upload firmware file. File [%s]" % filename)
+
+ def delete_files(self):
+ """Remove firmware and nvsram file."""
+ for filename in self.remove_files:
+ try:
+ rc, response = self.request("firmware/upload/%s" % filename, method="DELETE")
+ except Exception as error:
+ self.upload_failures.append(filename)
+ self.module.warn(msg="Failed to delete firmware file. File [%s]" % filename)
+
+ def apply(self):
+ """Upgrade controller firmware."""
+ change_required = False
+ if not self.is_proxy():
+ self.module.fail_json(msg="Module can only be executed against SANtricity Web Services Proxy.")
+
+ self.determine_file_paths()
+ self.determine_changes()
+ if self.add_files or self.remove_files:
+ change_required = True
+
+ if change_required and not self.module.check_mode:
+ self.upload_files()
+ self.delete_files()
+
+ if self.upload_failures:
+ self.module.fail_json(msg="Some file failed to be uploaded! changed=%s, Files_added [%s]. Files_removed [%s]. Upload_failures [%s]"
+ % (change_required, self.add_files, self.remove_files, self.upload_failures))
+ self.module.exit_json(changed=change_required, files_added=self.add_files, files_removed=self.remove_files)
+
+
+def main():
+ proxy_firmware_upload = NetAppESeriesProxyFirmwareUpload()
+ proxy_firmware_upload.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py
new file mode 100644
index 00000000..68101a3f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py
@@ -0,0 +1,579 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_proxy_systems
+short_description: NetApp E-Series manage SANtricity web services proxy storage arrays
+description:
+ - Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_proxy_doc
+options:
+ add_discovered_systems:
+ description:
+ - This flag will force all discovered storage systems to be added to SANtricity Web Services Proxy.
+ type: bool
+ required: false
+ default: false
+ systems:
+ description:
+ - List of storage system information which defines which systems should be added on SANtricity Web Services Proxy.
+ - Accepts a simple serial number list or list of dictionary containing at minimum the serial or addresses key from the sub-option list.
+ - Note that the serial number will be used as the storage system identifier when an identifier is not specified.
+ - When I(add_discovered_systems == False) and any system serial number not supplied that is discovered will be removed from the proxy.
+ type: list
+ required: False
+ default: []
+ suboptions:
+ ssid:
+ description:
+ - This is the Web Services Proxy's identifier for a storage system.
+ - When ssid is not specified then either the serial or first controller IPv4 address will be used instead.
+ type: str
+ required: false
+ serial:
+ description:
+ - Storage system's serial number which can be located on the top of every NetApp E-Series enclosure.
+ - Include any leading zeros.
+ - Mutually exclusive with the sub-option address.
+ type: str
+ required: false
+ addresses:
+ description:
+ - List of storage system's IPv4 addresses.
+ - Mutually exclusive with the sub-option serial.
+ type: list
+ required: false
+ password:
+ description:
+ - This is the storage system admin password.
+ - When not provided I(default_password) will be used.
+ - The storage system admin password will be set on the device itself with the provided admin password if it is not set.
+ type: str
+ required: false
+ tags:
+ description:
+ - Optional meta tags to associate to the storage system
+ type: dict
+ required: false
+ subnet_mask:
+ description:
+ - This is the IPv4 search range for discovering E-Series storage arrays.
+ - IPv4 subnet mask specified in CIDR form. Example 192.168.1.0/24 would search the range 192.168.1.0 to 192.168.1.255.
+ - Be sure to include all management paths in the search range.
+ type: str
+ required: false
+ password:
+ description:
+ - Default storage system password which will be used anytime when password has not been provided in the I(systems) sub-options.
+ - The storage system admin password will be set on the device itself with the provided admin password if it is not set.
+ type: str
+ required: false
+ tags:
+ description:
+ - Default meta tags to associate with all storage systems if not otherwise specified in I(systems) sub-options.
+ type: dict
+ required: false
+ accept_certificate:
+ description:
+ - Accept the storage system's certificate automatically even when it is self-signed.
+ - Use M(netapp_eseries.santricity.na_santricity_certificates) to add certificates to SANtricity Web Services Proxy.
+ - SANtricity Web Services Proxy will fail to add any untrusted storage system.
+ type: bool
+ required: false
+ default: true
+"""
+
+EXAMPLES = """
+---
+ - name: Add storage systems to SANtricity Web Services Proxy
+ na_santricity_proxy_systems:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ subnet_mask: 192.168.1.0/24
+ password: password
+ tags:
+ tag: value
+ accept_certificate: True
+ systems:
+ - ssid: "system1"
+ serial: "056233035640"
+ password: "asecretpassword"
+ tags:
+ use: corporate
+ location: sunnyvale
+ - ssid: "system2"
+ addresses:
+ - 192.168.1.100
+ - 192.168.2.100 # Second is not be required. It will be discovered
+ password: "anothersecretpassword"
+ - serial: "021324673799"
+ - "021637323454"
+ - name: Add storage system to SANtricity Web Services Proxy with serial number list only. The serial numbers will be used to identify each system.
+ na_santricity_proxy_systems:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ subnet_mask: 192.168.1.0/24
+ password: password
+ accept_certificate: True
+ systems:
+ - "1144FG123018"
+ - "721716500123"
+ - "123540006043"
+ - "112123001239"
+ - name: Add all discovered storage system to SANtricity Web Services Proxy found in the IP address range 192.168.1.0 to 192.168.1.255.
+ na_santricity_proxy_systems:
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ add_discovered_systems: True
+ subnet_mask: 192.168.1.0/24
+ password: password
+ accept_certificate: True
+"""
+RETURN = """
+msg:
+ description: Description of actions performed.
+ type: str
+ returned: always
+ sample: "Storage systems [system1, system2, 1144FG123018, 721716500123, 123540006043, 112123001239] were added."
+"""
+import json
+import threading
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+from time import sleep
+
+try:
+ import ipaddress
+except ImportError:
+ HAS_IPADDRESS = False
+else:
+ HAS_IPADDRESS = True
+
+
+class NetAppESeriesProxySystems(NetAppESeriesModule):
+ DEFAULT_CONNECTION_TIMEOUT_SEC = 30
+ DEFAULT_GRAPH_DISCOVERY_TIMEOUT = 30
+ DEFAULT_PASSWORD_STATE_TIMEOUT = 30
+ DEFAULT_DISCOVERY_TIMEOUT_SEC = 300
+
+ def __init__(self):
+ ansible_options = dict(add_discovered_systems=dict(type="bool", required=False, default=False),
+ subnet_mask=dict(type="str", required=False),
+ password=dict(type="str", required=False, default="", no_log=True),
+ tags=dict(type="dict", required=False),
+ accept_certificate=dict(type="bool", required=False, default=True),
+ systems=dict(type="list", required=False, default=[], suboptions=dict(ssid=dict(type="str", required=False),
+ serial=dict(type="str", required=False),
+ addresses=dict(type="list", required=False),
+ password=dict(type="str", required=False, no_log=True),
+ tags=dict(type="dict", required=False))))
+
+ super(NetAppESeriesProxySystems, self).__init__(ansible_options=ansible_options,
+ web_services_version="04.10.0000.0000",
+ supports_check_mode=True,
+ proxy_specific_task=True)
+ args = self.module.params
+ self.add_discovered_systems = args["add_discovered_systems"]
+ self.subnet_mask = args["subnet_mask"]
+ self.accept_certificate = args["accept_certificate"]
+ self.default_password = args["password"]
+
+ self.default_meta_tags = []
+ if "tags" in args and args["tags"]:
+ for key in args["tags"].keys():
+ if isinstance(args["tags"][key], list):
+ self.default_meta_tags.append({"key": key, "valueList": args["tags"][key]})
+ else:
+ self.default_meta_tags.append({"key": key, "valueList": [args["tags"][key]]})
+ self.default_meta_tags = sorted(self.default_meta_tags, key=lambda x: x["key"])
+
+ self.undiscovered_systems = []
+ self.systems_to_remove = []
+ self.systems_to_update = []
+ self.systems_to_add = []
+
+ self.serial_numbers = []
+ self.systems = []
+ if args["systems"]:
+ for system in args["systems"]:
+
+ if isinstance(system, str): # system is a serial number
+ self.serial_numbers.append(system)
+ self.systems.append({"ssid": system,
+ "serial": system,
+ "password": self.default_password,
+ "password_valid": None,
+ "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": self.default_meta_tags,
+ "controller_addresses": [],
+ "embedded_available": None,
+ "accept_certificate": False,
+ "current_info": {},
+ "changes": {},
+ "updated_required": False,
+ "failed": False,
+ "discovered": False})
+ elif isinstance(system, dict): # system is a dictionary of system details
+ if "ssid" not in system:
+ if "serial" in system and system["serial"]:
+ system.update({"ssid": system["serial"]})
+ elif "addresses" in system and system["addresses"]:
+ system.update({"ssid": system["addresses"][0]})
+ if "password" not in system:
+ system.update({"password": self.default_password})
+
+ if "serial" in system and system["serial"]:
+ self.serial_numbers.append(system["serial"])
+
+ # Structure meta tags for Web Services
+ meta_tags = self.default_meta_tags
+ if "meta_tags" in system and system["meta_tags"]:
+ for key in system["meta_tags"].keys():
+ if isinstance(system["meta_tags"][key], list):
+ meta_tags.append({"key": key, "valueList": system["meta_tags"][key]})
+ else:
+ meta_tags.append({"key": key, "valueList": [system["meta_tags"][key]]})
+ meta_tags = sorted(meta_tags, key=lambda x: x["key"])
+
+ self.systems.append({"ssid": str(system["ssid"]),
+ "serial": system["serial"] if "serial" in system else "",
+ "password": system["password"],
+ "password_valid": None,
+ "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": meta_tags,
+ "controller_addresses": system["addresses"] if "addresses" in system else [],
+ "embedded_available": None,
+ "accept_certificate": False,
+ "current_info": {},
+ "changes": {},
+ "updated_required": False,
+ "failed": False,
+ "discovered": False})
+ else:
+ self.module.fail_json(msg="Invalid system! All systems must either be a simple serial number or a dictionary. Failed system: %s" % system)
+
+ # Update default request headers
+ self.DEFAULT_HEADERS.update({"x-netapp-password-validate-method": "none"})
+
+ def discover_array(self):
+ """Search for array using the world wide identifier."""
+ subnet = ipaddress.ip_network(u"%s" % self.subnet_mask)
+
+ try:
+ rc, request_id = self.request("discovery", method="POST", data={"startIP": str(subnet[0]), "endIP": str(subnet[-1]),
+ "connectionTimeout": self.DEFAULT_CONNECTION_TIMEOUT_SEC})
+
+ # Wait for discover to complete
+ discovered_systems = None
+ try:
+ for iteration in range(self.DEFAULT_DISCOVERY_TIMEOUT_SEC):
+ rc, discovered_systems = self.request("discovery?requestId=%s" % request_id["requestId"])
+ if not discovered_systems["discoverProcessRunning"]:
+ break
+ sleep(1)
+ else:
+ self.module.fail_json(msg="Timeout waiting for array discovery process. Subnet [%s]" % self.subnet_mask)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to get the discovery results. Error [%s]." % to_native(error))
+
+ if not discovered_systems:
+ self.module.warn("Discovery found no systems. IP starting address [%s]. IP ending address: [%s]." % (str(subnet[0]), str(subnet[-1])))
+ else:
+ # Add all newly discovered systems. This is ignore any supplied systems to prevent any duplicates.
+ if self.add_discovered_systems:
+ for discovered_system in discovered_systems["storageSystems"]:
+ if discovered_system["serialNumber"] not in self.serial_numbers:
+ self.systems.append({"ssid": discovered_system["serialNumber"],
+ "serial": discovered_system["serialNumber"],
+ "password": self.default_password,
+ "password_valid": None,
+ "password_set": None,
+ "stored_password_valid": None,
+ "meta_tags": self.default_meta_tags,
+ "controller_addresses": [],
+ "embedded_available": None,
+ "accept_certificate": False,
+ "current_info": {},
+ "changes": {},
+ "updated_required": False,
+ "failed": False,
+ "discovered": False})
+
+ # Update controller_addresses
+ for system in self.systems:
+ for discovered_system in discovered_systems["storageSystems"]:
+ if (system["serial"] == discovered_system["serialNumber"] or
+ (system["controller_addresses"] and
+ all([address in discovered_system["ipAddresses"] for address in system["controller_addresses"]]))):
+ system["controller_addresses"] = sorted(discovered_system["ipAddresses"])
+ system["embedded_available"] = "https" in discovered_system["supportedManagementPorts"]
+ system["accept_certificate"] = system["embedded_available"] and self.accept_certificate
+ system["discovered"] = True
+ break
+ else: # Remove any undiscovered system from the systems list
+
+ self.undiscovered_systems.append(system["ssid"])
+ # self.systems.remove(system)
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate array discovery. Error [%s]." % to_native(error))
+
+ def update_storage_systems_info(self):
+ """Get current web services proxy storage systems."""
+ try:
+ rc, existing_systems = self.request("storage-systems")
+
+ # Mark systems for adding or removing
+ for system in self.systems:
+ for existing_system in existing_systems:
+ if system["ssid"] == existing_system["id"]:
+ system["current_info"] = existing_system
+
+ if system["current_info"]["passwordStatus"] in ["unknown", "securityLockout"]:
+ system["failed"] = True
+ self.module.warn("Skipping storage system [%s] because of current password status [%s]"
+ % (system["ssid"], system["current_info"]["passwordStatus"]))
+ if system["current_info"]["metaTags"]:
+ system["current_info"]["metaTags"] = sorted(system["current_info"]["metaTags"], key=lambda x: x["key"])
+ break
+ else:
+ self.systems_to_add.append(system)
+
+ # Mark systems for removing
+ for existing_system in existing_systems:
+ for system in self.systems:
+ if existing_system["id"] == system["ssid"]:
+
+ # Leave existing but undiscovered storage systems alone and throw a warning.
+ if existing_system["id"] in self.undiscovered_systems:
+ self.undiscovered_systems.remove(existing_system["id"])
+ self.module.warn("Expected storage system exists on the proxy but was failed to be discovered. Array [%s]." % existing_system["id"])
+ break
+ else:
+ self.systems_to_remove.append(existing_system["id"])
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage systems. Error [%s]." % to_native(error))
+
+ def set_password(self, system):
+ """Determine whether password has been set and, if it hasn't been set, set it."""
+ if system["embedded_available"] and system["controller_addresses"]:
+ for url in ["https://%s:8443/devmgr" % system["controller_addresses"][0],
+ "https://%s:443/devmgr" % system["controller_addresses"][0],
+ "http://%s:8080/devmgr" % system["controller_addresses"][0]]:
+ try:
+ rc, response = self._request("%s/utils/login?uid=admin&xsrf=false&onlycheck=true" % url, ignore_errors=True, url_username="admin",
+ url_password="", validate_certs=False)
+
+ if rc == 200: # successful login without password
+ system["password_set"] = False
+ if system["password"]:
+ try:
+ rc, storage_system = self._request("%s/v2/storage-systems/1/passwords" % url, method="POST", url_username="admin",
+ headers=self.DEFAULT_HEADERS, url_password="", validate_certs=False,
+ data=json.dumps({"currentAdminPassword": "", "adminPassword": True,
+ "newPassword": system["password"]}))
+
+ except Exception as error:
+ system["failed"] = True
+ self.module.warn("Failed to set storage system password. Array [%s]." % system["ssid"])
+ break
+
+ elif rc == 401: # unauthorized
+ system["password_set"] = True
+ break
+ except Exception as error:
+ pass
+ else:
+ self.module.warn("Failed to retrieve array password state. Array [%s]." % system["ssid"])
+ system["failed"] = True
+
+ def update_system_changes(self, system):
+ """Determine whether storage system configuration changes are required """
+ if system["current_info"]:
+ system["changes"] = dict()
+
+ # Check if management paths should be updated
+ if (sorted(system["controller_addresses"]) != sorted(system["current_info"]["managementPaths"]) or
+ system["current_info"]["ip1"] not in system["current_info"]["managementPaths"] or
+ system["current_info"]["ip2"] not in system["current_info"]["managementPaths"]):
+ system["changes"].update({"controllerAddresses": system["controller_addresses"]})
+
+ # Check for expected meta tag count
+ if len(system["meta_tags"]) != len(system["current_info"]["metaTags"]):
+ if len(system["meta_tags"]) == 0:
+ system["changes"].update({"removeAllTags": True})
+ else:
+ system["changes"].update({"metaTags": system["meta_tags"]})
+
+ # Check for expected meta tag key-values
+ else:
+ for index in range(len(system["meta_tags"])):
+ if (system["current_info"]["metaTags"][index]["key"] != system["meta_tags"][index]["key"] or
+ sorted(system["current_info"]["metaTags"][index]["valueList"]) != sorted(system["meta_tags"][index]["valueList"])):
+ system["changes"].update({"metaTags": system["meta_tags"]})
+ break
+
+ # Check whether CA certificate should be accepted
+ if system["accept_certificate"] and not all([controller["certificateStatus"] == "trusted" for controller in system["current_info"]["controllers"]]):
+ system["changes"].update({"acceptCertificate": True})
+
+ if system["id"] not in self.undiscovered_systems and system["changes"]:
+ self.systems_to_update.append(system)
+
+ def add_system(self, system):
+ """Add basic storage system definition to the web services proxy."""
+ self.set_password(system)
+
+ body = {"id": system["ssid"],
+ "controllerAddresses": system["controller_addresses"],
+ "password": system["password"]}
+ if system["accept_certificate"]: # Set only if embedded is available and accept_certificates==True
+ body.update({"acceptCertificate": system["accept_certificate"]})
+ if system["meta_tags"]:
+ body.update({"metaTags": system["meta_tags"]})
+
+ try:
+ rc, storage_system = self.request("storage-systems", method="POST", data=body)
+ except Exception as error:
+ self.module.warn("Failed to add storage system. Array [%s]. Error [%s]" % (system["ssid"], to_native(error)))
+ return # Skip the password validation.
+
+ # Ensure the password is validated
+ for retries in range(5):
+ sleep(1)
+ try:
+ rc, storage_system = self.request("storage-systems/%s/validatePassword" % system["ssid"], method="POST")
+ break
+ except Exception as error:
+ continue
+ else:
+ self.module.warn("Failed to validate password status. Array [%s]. Error [%s]" % (system["ssid"], to_native(error)))
+
+ def update_system(self, system):
+ """Update storage system configuration."""
+ try:
+ rc, storage_system = self.request("storage-systems/%s" % system["ssid"], method="POST", data=system["changes"])
+ except Exception as error:
+ self.module.warn("Failed to update storage system. Array [%s]. Error [%s]" % (system["ssid"], to_native(error)))
+
+ def remove_system(self, ssid):
+ """Remove storage system."""
+ try:
+ rc, storage_system = self.request("storage-systems/%s" % ssid, method="DELETE")
+ except Exception as error:
+ self.module.warn("Failed to remove storage system. Array [%s]. Error [%s]." % (ssid, to_native(error)))
+
+ def apply(self):
+ """Determine whether changes are required and, if necessary, apply them."""
+ if self.is_embedded():
+ self.module.fail_json(msg="Cannot add/remove storage systems to SANtricity Web Services Embedded instance.")
+
+ if self.add_discovered_systems or self.systems:
+ if self.subnet_mask:
+ self.discover_array()
+ self.update_storage_systems_info()
+
+ # Determine whether the storage system requires updating
+ thread_pool = []
+ for system in self.systems:
+ if not system["failed"]:
+ thread = threading.Thread(target=self.update_system_changes, args=(system,))
+ thread_pool.append(thread)
+ thread.start()
+ for thread in thread_pool:
+ thread.join()
+ else:
+ self.update_storage_systems_info()
+
+ changes_required = False
+ if self.systems_to_add or self.systems_to_update or self.systems_to_remove:
+ changes_required = True
+
+ if changes_required and not self.module.check_mode:
+ add_msg = ""
+ update_msg = ""
+ remove_msg = ""
+
+ # Remove storage systems
+ if self.systems_to_remove:
+ ssids = []
+ thread_pool = []
+ for ssid in self.systems_to_remove:
+ thread = threading.Thread(target=self.remove_system, args=(ssid,))
+ thread_pool.append(thread)
+ thread.start()
+ ssids.append(ssid)
+ for thread in thread_pool:
+ thread.join()
+ if ssids:
+ remove_msg = "system%s removed: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids))
+
+ thread_pool = []
+
+ # Add storage systems
+ if self.systems_to_add:
+ ssids = []
+ for system in self.systems_to_add:
+ if not system["failed"]:
+ thread = threading.Thread(target=self.add_system, args=(system,))
+ thread_pool.append(thread)
+ thread.start()
+ ssids.append(system["ssid"])
+ if ssids:
+ add_msg = "system%s added: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids))
+
+ # Update storage systems
+ if self.systems_to_update:
+ ssids = []
+ for system in self.systems_to_update:
+ if not system["failed"]:
+ thread = threading.Thread(target=self.update_system, args=(system,))
+ thread_pool.append(thread)
+ thread.start()
+ ssids.append(system["ssid"])
+ if ssids:
+ update_msg = "system%s updated: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids))
+
+ # Wait for storage systems to be added or updated
+ for thread in thread_pool:
+ thread.join()
+
+ # Report module actions
+ if self.undiscovered_systems:
+ undiscovered_msg = "system%s undiscovered: %s" % ("s " if len(self.undiscovered_systems) > 1 else "", ", ".join(self.undiscovered_systems))
+ self.module.fail_json(msg=(", ".join([msg for msg in [add_msg, update_msg, remove_msg, undiscovered_msg] if msg])), changed=changes_required)
+
+ self.module.exit_json(msg=", ".join([msg for msg in [add_msg, update_msg, remove_msg] if msg]), changed=changes_required)
+
+ # Report no changes
+ if self.undiscovered_systems:
+ self.module.fail_json(msg="No changes were made; however the following system(s) failed to be discovered: %s."
+ % self.undiscovered_systems, changed=changes_required)
+ self.module.exit_json(msg="No changes were made.", changed=changes_required)
+
+
+def main():
+ proxy_systems = NetAppESeriesProxySystems()
+ proxy_systems.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py
new file mode 100644
index 00000000..a1307d59
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py
@@ -0,0 +1,926 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_storagepool
+short_description: NetApp E-Series manage volume groups and disk pools
+description: Create or remove volume groups and disk pools for NetApp E-series storage arrays.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Whether the specified storage pool should exist or not.
+ - Note that removing a storage pool currently requires the removal of all defined volumes first.
+ type: str
+ choices: ["present", "absent"]
+ default: "present"
+ name:
+ description:
+ - The name of the storage pool to manage
+ type: str
+ required: true
+ criteria_drive_count:
+ description:
+ - The number of disks to use for building the storage pool.
+ - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
+ - The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below)
+ type: int
+ required: false
+ criteria_min_usable_capacity:
+ description:
+ - The minimum size of the storage pool (in size_unit).
+ - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
+ - The pool will be expanded if this value exceeds its current size. (See expansion note below)
+ type: float
+ required: false
+ criteria_drive_type:
+ description:
+ - The type of disk (hdd or ssd) to use when searching for candidates to use.
+ - When not specified each drive type will be evaluated until successful drive candidates are found starting with
+ the most prevalent drive type.
+ type: str
+ choices: ["hdd","ssd"]
+ required: false
+ criteria_size_unit:
+ description:
+ - The unit used to interpret size parameters
+ type: str
+ choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"]
+ default: "gb"
+ required: false
+ criteria_drive_min_size:
+ description:
+ - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
+ type: float
+ required: false
+ criteria_drive_interface_type:
+ description:
+ - The interface type to use when selecting drives for the storage pool
+ - If not provided then all interface types will be considered.
+ type: str
+ choices: ["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"]
+ required: false
+ criteria_drive_require_da:
+ description:
+ - Ensures the storage pool will be created with only data assurance (DA) capable drives.
+ - Only available for new storage pools; existing storage pools cannot be converted.
+ type: bool
+ default: false
+ required: false
+ criteria_drive_require_fde:
+ description:
+ - Whether full disk encryption ability is required for drives to be added to the storage pool
+ type: bool
+ default: false
+ required: false
+ raid_level:
+ description:
+ - The RAID level of the storage pool to be created.
+ - Required only when I(state=="present").
+ - When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required
+ depending on the storage array specifications.
+ - When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required.
+ - When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required.
+ - When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required.
+ - When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required.
+ - When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required.
+ - Note that raidAll will be treated as raidDiskPool and raid3 as raid5.
+ type: str
+ default: "raidDiskPool"
+ choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"]
+ required: false
+ secure_pool:
+ description:
+ - Enables security at rest feature on the storage pool.
+ - Will only work if all drives in the pool are security capable (FDE, FIPS, or mix)
+ - Warning, once security is enabled it is impossible to disable without erasing the drives.
+ type: bool
+ required: false
+ reserve_drive_count:
+ description:
+ - Set the number of drives reserved by the storage pool for reconstruction operations.
+ - Only valid on raid disk pools.
+ type: int
+ required: false
+ remove_volumes:
+ description:
+ - Prior to removing a storage pool, delete all volumes in the pool.
+ type: bool
+ default: true
+ required: false
+ erase_secured_drives:
+ description:
+ - If I(state=="absent") then all storage pool drives will be erase
+ - If I(state=="present") then delete all available storage array drives that have security enabled.
+ type: bool
+ default: true
+ required: false
+notes:
+ - The expansion operations are non-blocking due to the time consuming nature of expanding volume groups
+ - Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each
+ required step will be attempted until the request fails which is likely because of the required expansion time.
+ - raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5.
+ - Tray loss protection and drawer loss protection will be chosen if at all possible.
+"""
+EXAMPLES = """
+- name: No disk groups
+ na_santricity_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Json facts for the pool that was created.
+"""
+import functools
+from itertools import groupby
+from time import sleep
+
+from pprint import pformat
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+
+
+def get_most_common_elements(iterator):
+ """Returns a generator containing a descending list of most common elements."""
+ if not isinstance(iterator, list):
+ raise TypeError("iterator must be a list.")
+
+ grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))]
+ return sorted(grouped, key=lambda x: x[1], reverse=True)
+
+
+def memoize(func):
+ """Generic memoizer for any function with any number of arguments including zero."""
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ class MemoizeFuncArgs(dict):
+ def __missing__(self, _key):
+ self[_key] = func(*args, **kwargs)
+ return self[_key]
+
+ key = str((args, kwargs)) if args and kwargs else "no_argument_response"
+ return MemoizeFuncArgs().__getitem__(key)
+
+ return wrapper
+
+
+class NetAppESeriesStoragePool(NetAppESeriesModule):
+ EXPANSION_TIMEOUT_SEC = 10
+ DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(
+ state=dict(choices=["present", "absent"], default="present", type="str"),
+ name=dict(required=True, type="str"),
+ criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
+ default="gb", type="str"),
+ criteria_drive_count=dict(type="int"),
+ criteria_drive_interface_type=dict(choices=["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"],
+ type="str"),
+ criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False),
+ criteria_drive_min_size=dict(type="float"),
+ criteria_drive_require_da=dict(type="bool", required=False),
+ criteria_drive_require_fde=dict(type="bool", required=False),
+ criteria_min_usable_capacity=dict(type="float"),
+ raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"],
+ default="raidDiskPool"),
+ erase_secured_drives=dict(type="bool", default=True),
+ secure_pool=dict(type="bool", default=False),
+ reserve_drive_count=dict(type="int"),
+ remove_volumes=dict(type="bool", default=True))
+
+ required_if = [["state", "present", ["raid_level"]]]
+ super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True,
+ required_if=required_if)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.ssid = args["ssid"]
+ self.name = args["name"]
+ self.criteria_drive_count = args["criteria_drive_count"]
+ self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"]
+ self.criteria_size_unit = args["criteria_size_unit"]
+ self.criteria_drive_min_size = args["criteria_drive_min_size"]
+ self.criteria_drive_type = args["criteria_drive_type"]
+ self.criteria_drive_interface_type = args["criteria_drive_interface_type"]
+ self.criteria_drive_require_fde = args["criteria_drive_require_fde"]
+ self.criteria_drive_require_da = args["criteria_drive_require_da"]
+ self.raid_level = args["raid_level"]
+ self.erase_secured_drives = args["erase_secured_drives"]
+ self.secure_pool = args["secure_pool"]
+ self.reserve_drive_count = args["reserve_drive_count"]
+ self.remove_volumes = args["remove_volumes"]
+ self.pool_detail = None
+
+ # Change all sizes to be measured in bytes
+ if self.criteria_min_usable_capacity:
+ self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity *
+ self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ if self.criteria_drive_min_size:
+ self.criteria_drive_min_size = int(self.criteria_drive_min_size *
+ self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ self.criteria_size_unit = "bytes"
+
+ # Adjust unused raid level option to reflect documentation
+ if self.raid_level == "raidAll":
+ self.raid_level = "raidDiskPool"
+ if self.raid_level == "raid3":
+ self.raid_level = "raid5"
+
+ @property
+ @memoize
+ def available_drives(self):
+ """Determine the list of available drives"""
+ return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"]
+
+ @property
+ @memoize
+ def available_drive_types(self):
+ """Determine the types of available drives sorted by the most common first."""
+ types = [drive["driveMediaType"] for drive in self.drives]
+ return [entry[0] for entry in get_most_common_elements(types)]
+
+ @property
+ @memoize
+ def available_drive_interface_types(self):
+ """Determine the types of available drives."""
+ interfaces = [drive["phyDriveType"] for drive in self.drives]
+ return [entry[0] for entry in get_most_common_elements(interfaces)]
+
+ @property
+ def storage_pool_drives(self):
+ """Retrieve list of drives found in storage pool."""
+ return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]]
+
+ @property
+ def expandable_drive_count(self):
+ """Maximum number of drives that a storage pool can be expended at a given time."""
+ capabilities = None
+ if self.raid_level == "raidDiskPool":
+ return len(self.available_drives)
+
+ try:
+ rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return capabilities["featureParameters"]["maxDCEDrives"]
+
+ @property
+ def disk_pool_drive_minimum(self):
+ """Provide the storage array's minimum disk pool drive count."""
+ rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True)
+
+ # Standard minimum is 11 drives but some allow 10 drives. 10 will be the default
+ if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or
+ attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0):
+ return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT
+
+ return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"]
+
+ def get_available_drive_capacities(self, drive_id_list=None):
+ """Determine the list of available drive capacities."""
+ if drive_id_list:
+ available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
+ if drive["id"] in drive_id_list and drive["available"] and
+ drive["status"] == "optimal"])
+ else:
+ available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
+ if drive["available"] and drive["status"] == "optimal"])
+
+ self.module.log("available drive capacities: %s" % available_drive_capacities)
+ return list(available_drive_capacities)
+
+ @property
+ def drives(self):
+ """Retrieve list of drives found in storage pool."""
+ drives = None
+ try:
+ rc, drives = self.request("storage-systems/%s/drives" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return drives
+
+ def is_drive_count_valid(self, drive_count):
+ """Validate drive count criteria is met."""
+ if self.criteria_drive_count and drive_count < self.criteria_drive_count:
+ return False
+
+ if self.raid_level == "raidDiskPool":
+ return drive_count >= self.disk_pool_drive_minimum
+ if self.raid_level == "raid0":
+ return drive_count > 0
+ if self.raid_level == "raid1":
+ return drive_count >= 2 and (drive_count % 2) == 0
+ if self.raid_level in ["raid3", "raid5"]:
+ return 3 <= drive_count <= 30
+ if self.raid_level == "raid6":
+ return 5 <= drive_count <= 30
+ return False
+
+ @property
+ def storage_pool(self):
+ """Retrieve storage pool information."""
+ storage_pools_resp = None
+ try:
+ rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
+ % (self.ssid, to_native(err), self.state))
+
+ pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name]
+ return pool_detail[0] if pool_detail else dict()
+
+ @property
+ def storage_pool_volumes(self):
+ """Retrieve list of volumes associated with storage pool."""
+ volumes_resp = None
+ try:
+ rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
+ % (self.ssid, to_native(err), self.state))
+
+ group_ref = self.storage_pool["volumeGroupRef"]
+ storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref]
+ return storage_pool_volume_list
+
+ def get_ddp_capacity(self, expansion_drive_list):
+ """Return the total usable capacity based on the additional drives."""
+
+ def get_ddp_error_percent(_drive_count, _extent_count):
+ """Determine the space reserved for reconstruction"""
+ if _drive_count <= 36:
+ if _extent_count <= 600:
+ return 0.40
+ elif _extent_count <= 1400:
+ return 0.35
+ elif _extent_count <= 6200:
+ return 0.20
+ elif _extent_count <= 50000:
+ return 0.15
+ elif _drive_count <= 64:
+ if _extent_count <= 600:
+ return 0.20
+ elif _extent_count <= 1400:
+ return 0.15
+ elif _extent_count <= 6200:
+ return 0.10
+ elif _extent_count <= 50000:
+ return 0.05
+ elif _drive_count <= 480:
+ if _extent_count <= 600:
+ return 0.20
+ elif _extent_count <= 1400:
+ return 0.15
+ elif _extent_count <= 6200:
+ return 0.10
+ elif _extent_count <= 50000:
+ return 0.05
+
+ self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid)
+
+ def get_ddp_reserved_drive_count(_disk_count):
+ """Determine the number of reserved drive."""
+ reserve_count = 0
+
+ if self.reserve_drive_count:
+ reserve_count = self.reserve_drive_count
+ elif _disk_count >= 256:
+ reserve_count = 8
+ elif _disk_count >= 192:
+ reserve_count = 7
+ elif _disk_count >= 128:
+ reserve_count = 6
+ elif _disk_count >= 64:
+ reserve_count = 4
+ elif _disk_count >= 32:
+ reserve_count = 3
+ elif _disk_count >= 12:
+ reserve_count = 2
+ elif _disk_count == 11:
+ reserve_count = 1
+
+ return reserve_count
+
+ if self.pool_detail:
+ drive_count = len(self.storage_pool_drives) + len(expansion_drive_list)
+ else:
+ drive_count = len(expansion_drive_list)
+
+ drive_usable_capacity = min(min(self.get_available_drive_capacities()),
+ min(self.get_available_drive_capacities(expansion_drive_list)))
+ drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912)
+ maximum_stripe_count = (drive_count * drive_data_extents) / 10
+
+ error_percent = get_ddp_error_percent(drive_count, drive_data_extents)
+ error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10
+
+ total_stripe_count = maximum_stripe_count - error_overhead
+ stripe_count_per_drive = total_stripe_count / drive_count
+ reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive
+ available_stripe_count = total_stripe_count - reserved_stripe_count
+
+ return available_stripe_count * 4294967296
+
+ @memoize
+ def get_candidate_drives(self):
+ """Retrieve set of drives candidates for creating a new storage pool."""
+
+ def get_candidate_drive_request():
+ """Perform request for new volume creation."""
+
+ candidates_list = list()
+ drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types
+ interface_types = [self.criteria_drive_interface_type] \
+ if self.criteria_drive_interface_type else self.available_drive_interface_types
+
+ for interface_type in interface_types:
+ for drive_type in drive_types:
+ candidates = None
+ volume_candidate_request_data = dict(
+ type="diskPool" if self.raid_level == "raidDiskPool" else "traditional",
+ diskPoolVolumeCandidateRequestData=dict(
+ reconstructionReservedDriveCount=65535))
+ candidate_selection_type = dict(
+ candidateSelectionType="count",
+ driveRefList=dict(driveRef=self.available_drives))
+ criteria = dict(raidLevel=self.raid_level,
+ phyDriveType=interface_type,
+ dssPreallocEnabled=False,
+ securityType="capable" if self.criteria_drive_require_fde else "none",
+ driveMediaType=drive_type,
+ onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False,
+ volumeCandidateRequestData=volume_candidate_request_data,
+ allocateReserveSpace=False,
+ securityLevel="fde" if self.criteria_drive_require_fde else "none",
+ candidateSelectionType=candidate_selection_type)
+
+ try:
+ rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError"
+ "Response=true" % self.ssid, data=criteria, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ if candidates:
+ candidates_list.extend(candidates["volumeCandidate"])
+
+ if candidates_list:
+ def candidate_sort_function(entry):
+ """Orders candidates based on tray/drawer loss protection."""
+ preference = 3
+ if entry["drawerLossProtection"]:
+ preference -= 1
+ if entry["trayLossProtection"]:
+ preference -= 2
+ return preference
+ candidates_list.sort(key=candidate_sort_function)
+
+ return candidates_list
+
+ # Determine the appropriate candidate list
+ for candidate in get_candidate_drive_request():
+
+ # Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size
+ if self.criteria_drive_count:
+ if self.criteria_drive_count != int(candidate["driveCount"]):
+ continue
+ if self.criteria_min_usable_capacity:
+ if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity >
+ self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or
+ self.criteria_min_usable_capacity > int(candidate["usableSize"])):
+ continue
+ if self.criteria_drive_min_size:
+ if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])):
+ continue
+
+ return candidate
+
+ self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
+
+ @memoize
+ def get_expansion_candidate_drives(self):
+ """Retrieve required expansion drive list.
+
+ Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there
+ is a potential limitation on how many drives can be incorporated at a time.
+ * Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools.
+
+ :return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint
+ """
+
+ def get_expansion_candidate_drive_request():
+ """Perform the request for expanding existing volume groups or disk pools.
+
+ Note: the list of candidate structures do not necessarily produce candidates that meet all criteria.
+ """
+ candidates_list = None
+ url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid
+
+ try:
+ rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"])
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return candidates_list["candidates"]
+
+ required_candidate_list = list()
+ required_additional_drives = 0
+ required_additional_capacity = 0
+ total_required_capacity = 0
+
+ # determine whether and how much expansion is need to satisfy the specified criteria
+ if self.criteria_min_usable_capacity:
+ total_required_capacity = self.criteria_min_usable_capacity
+ required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"])
+
+ if self.criteria_drive_count:
+ required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives)
+
+ # Determine the appropriate expansion candidate list
+ if required_additional_drives > 0 or required_additional_capacity > 0:
+ for candidate in get_expansion_candidate_drive_request():
+
+ if self.criteria_drive_min_size:
+ if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])):
+ continue
+
+ if self.raid_level == "raidDiskPool":
+ if (len(candidate["drives"]) >= required_additional_drives and
+ self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity):
+ required_candidate_list.append(candidate)
+ break
+ else:
+ required_additional_drives -= len(candidate["drives"])
+ required_additional_capacity -= int(candidate["usableCapacity"])
+ required_candidate_list.append(candidate)
+
+ # Determine if required drives and capacities are satisfied
+ if required_additional_drives <= 0 and required_additional_capacity <= 0:
+ break
+ else:
+ self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
+
+ return required_candidate_list
+
+ def get_reserve_drive_count(self):
+ """Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool)."""
+
+ if not self.pool_detail:
+ self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid)
+
+ if self.raid_level != "raidDiskPool":
+ self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
+ % (self.pool_detail["id"], self.ssid))
+
+ return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"]
+
+ def get_maximum_reserve_drive_count(self):
+ """Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool)."""
+ if self.raid_level != "raidDiskPool":
+ self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
+ % (self.pool_detail["id"], self.ssid))
+
+ drives_ids = list()
+
+ if self.pool_detail:
+ drives_ids.extend(self.storage_pool_drives)
+ for candidate in self.get_expansion_candidate_drives():
+ drives_ids.extend((candidate["drives"]))
+ else:
+ candidate = self.get_candidate_drives()
+ drives_ids.extend(candidate["driveRefList"]["driveRef"])
+
+ drive_count = len(drives_ids)
+ maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10)
+ if maximum_reserve_drive_count > 10:
+ maximum_reserve_drive_count = 10
+
+ return maximum_reserve_drive_count
+
+ def set_reserve_drive_count(self, check_mode=False):
+ """Set the reserve drive count for raidDiskPool."""
+ changed = False
+
+ if self.raid_level == "raidDiskPool" and self.reserve_drive_count:
+ maximum_count = self.get_maximum_reserve_drive_count()
+
+ if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count:
+ self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. "
+ "Note that it may be necessary to wait for expansion operations to complete "
+ "before the adjusting the reserve drive count. Maximum [%s]. Array [%s]."
+ % (maximum_count, self.ssid))
+
+ if self.reserve_drive_count != self.get_reserve_drive_count():
+ changed = True
+
+ if not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid,
+ method="POST", data=dict(volumeGroupRef=self.pool_detail["id"],
+ newDriveCount=self.reserve_drive_count))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]."
+ " Array [%s]." % (self.pool_detail["id"], self.ssid))
+
+ return changed
+
+ def erase_all_available_secured_drives(self, check_mode=False):
+ """Erase all available drives that have encryption at rest feature enabled."""
+ changed = False
+ drives_list = list()
+ for drive in self.drives:
+ if drive["available"] and drive["fdeEnabled"]:
+ changed = True
+ drives_list.append(drive["id"])
+
+ if drives_list and not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
+ % self.ssid, method="POST", data=dict(driveRef=drives_list))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid)
+
+ return changed
+
+ def create_storage_pool(self):
+ """Create new storage pool."""
+ url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid
+ request_body = dict(label=self.name,
+ candidate=self.get_candidate_drives())
+
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid
+
+ request_body.update(
+ dict(backgroundOperationPriority="useDefault",
+ criticalReconstructPriority="useDefault",
+ degradedReconstructPriority="useDefault",
+ poolUtilizationCriticalThreshold=65535,
+ poolUtilizationWarningThreshold=0))
+
+ if self.reserve_drive_count:
+ request_body.update(dict(volumeCandidateData=dict(
+ diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count))))
+
+ try:
+ rc, resp = self.request(url, method="POST", data=request_body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ # Update drive and storage pool information
+ self.pool_detail = self.storage_pool
+
+ def delete_storage_pool(self):
+ """Delete storage pool."""
+ storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]]
+ try:
+ delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else ""
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s%s"
+ % (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]."
+ % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ if storage_pool_drives and self.erase_secured_drives:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
+ % self.ssid, method="POST", data=dict(driveRef=storage_pool_drives))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]."
+ " Error [%s]." % (self.ssid, to_native(error)))
+
+ def secure_storage_pool(self, check_mode=False):
+ """Enable security on an existing storage pool"""
+ self.pool_detail = self.storage_pool
+ needs_secure_pool = False
+
+ if not self.secure_pool and self.pool_detail["securityType"] == "enabled":
+ self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.")
+ if self.secure_pool and self.pool_detail["securityType"] != "enabled":
+ needs_secure_pool = True
+
+ if needs_secure_pool and not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]),
+ data=dict(securePool=True), method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error"
+ " [%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ self.pool_detail = self.storage_pool
+ return needs_secure_pool
+
+ def migrate_raid_level(self, check_mode=False):
+ """Request storage pool raid level migration."""
+ needs_migration = self.raid_level != self.pool_detail["raidLevel"]
+ if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool":
+ self.module.fail_json(msg="Raid level cannot be changed for disk pools")
+
+ if needs_migration and not check_mode:
+ sp_raid_migrate_req = dict(raidLevel=self.raid_level)
+
+ try:
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration"
+ % (self.ssid, self.name), data=sp_raid_migrate_req, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]."
+ " Error[%s]." % (self.ssid, to_native(error)))
+
+ self.pool_detail = self.storage_pool
+ return needs_migration
+
+ def expand_storage_pool(self, check_mode=False):
+ """Add drives to existing storage pool.
+
+ :return bool: whether drives were required to be added to satisfy the specified criteria."""
+ expansion_candidate_list = self.get_expansion_candidate_drives()
+ changed_required = bool(expansion_candidate_list)
+ estimated_completion_time = 0.0
+
+ # build expandable groupings of traditional raid candidate
+ required_expansion_candidate_list = list()
+ while expansion_candidate_list:
+ subset = list()
+ while expansion_candidate_list and len(subset) < self.expandable_drive_count:
+ subset.extend(expansion_candidate_list.pop()["drives"])
+ required_expansion_candidate_list.append(subset)
+
+ if required_expansion_candidate_list and not check_mode:
+ url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid
+
+ while required_expansion_candidate_list:
+ candidate_drives_list = required_expansion_candidate_list.pop()
+ request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"],
+ driveRef=candidate_drives_list)
+ try:
+ rc, resp = self.request(url, method="POST", data=request_body)
+ except Exception as error:
+ rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
+ % (self.ssid, self.pool_detail["id"]), ignore_errors=True)
+ if rc == 200 and actions_resp:
+ actions = [action["currentAction"] for action in actions_resp
+ if action["volumeRef"] in self.storage_pool_volumes]
+ self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions"
+ " in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error[%s]."
+ % (", ".join(actions), self.pool_detail["id"], self.ssid,
+ to_native(error)))
+
+ self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]."
+ " Error[%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ # Wait for expansion completion unless it is the last request in the candidate list
+ if required_expansion_candidate_list:
+ for dummy in range(self.EXPANSION_TIMEOUT_SEC):
+ rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
+ % (self.ssid, self.pool_detail["id"]), ignore_errors=True)
+ if rc == 200:
+ for action in actions_resp:
+ if (action["volumeRef"] in self.storage_pool_volumes and
+ action["currentAction"] == "remappingDce"):
+ sleep(1)
+ estimated_completion_time = action["estimatedTimeToCompletion"]
+ break
+ else:
+ estimated_completion_time = 0.0
+ break
+
+ return changed_required, estimated_completion_time
+
+ def apply(self):
+ """Apply requested state to storage array."""
+ changed = False
+
+ if self.state == "present":
+ if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None:
+ self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be"
+ " specified.")
+ if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count):
+ self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.")
+
+ self.pool_detail = self.storage_pool
+ self.module.log(pformat(self.pool_detail))
+
+ if self.state == "present" and self.erase_secured_drives:
+ self.erase_all_available_secured_drives(check_mode=True)
+
+ # Determine whether changes need to be applied to the storage array
+ if self.pool_detail:
+
+ if self.state == "absent":
+ changed = True
+
+ elif self.state == "present":
+
+ if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives):
+ self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]."
+ % (self.ssid, self.pool_detail["id"]))
+
+ if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]:
+ self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type."
+ " Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"]))
+
+ if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da !=
+ self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]):
+ self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]."
+ " Pool [%s]." % (self.ssid, self.pool_detail["id"]))
+
+ # Evaluate current storage pool for required change.
+ needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True)
+ if needs_expansion:
+ changed = True
+ if self.migrate_raid_level(check_mode=True):
+ changed = True
+ if self.secure_storage_pool(check_mode=True):
+ changed = True
+ if self.set_reserve_drive_count(check_mode=True):
+ changed = True
+
+ elif self.state == "present":
+ changed = True
+
+ # Apply changes to storage array
+ msg = "No changes were required for the storage pool [%s]."
+ if changed and not self.module.check_mode:
+ if self.state == "present":
+ if self.erase_secured_drives:
+ self.erase_all_available_secured_drives()
+
+ if self.pool_detail:
+ change_list = list()
+
+ # Expansion needs to occur before raid level migration to account for any sizing needs.
+ expanded, estimated_completion_time = self.expand_storage_pool()
+ if expanded:
+ change_list.append("expanded")
+ if self.migrate_raid_level():
+ change_list.append("raid migration")
+ if self.secure_storage_pool():
+ change_list.append("secured")
+ if self.set_reserve_drive_count():
+ change_list.append("adjusted reserve drive count")
+
+ if change_list:
+ msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list)
+
+ if expanded:
+ msg += "\nThe expansion operation will complete in an estimated %s minutes."\
+ % estimated_completion_time
+ else:
+ self.create_storage_pool()
+ msg = "Storage pool [%s] was created."
+
+ if self.secure_storage_pool():
+ msg = "Storage pool [%s] was created and secured."
+ if self.set_reserve_drive_count():
+ msg += " Adjusted reserve drive count."
+
+ elif self.pool_detail:
+ self.delete_storage_pool()
+ msg = "Storage pool [%s] removed."
+
+ self.pool_detail = self.storage_pool
+ self.module.log(pformat(self.pool_detail))
+ self.module.log(msg % self.name)
+ self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail)
+
+
+def main():
+ storage_pool = NetAppESeriesStoragePool()
+ storage_pool.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py
new file mode 100644
index 00000000..76ca6e0c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_syslog
+short_description: NetApp E-Series manage syslog settings
+description:
+ - Allow the syslog settings to be configured for an individual E-Series storage-system
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Add or remove the syslog server configuration for E-Series storage array.
+ - Existing syslog server configuration will be removed or updated when its address matches I(address).
+ - Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be
+ treated as a match.
+ type: str
+ choices:
+ - present
+ - absent
+ default: present
+ required: false
+ address:
+ description:
+ - The syslog server's IPv4 address or a fully qualified hostname.
+ - All existing syslog configurations will be removed when I(state=absent) and I(address=None).
+ type: str
+ required: false
+ port:
+ description:
+ - This is the port the syslog server is using.
+ type: int
+ default: 514
+ required: false
+ protocol:
+ description:
+ - This is the transmission protocol the syslog server's using to receive syslog messages.
+ type: str
+ default: udp
+ choices:
+ - udp
+ - tcp
+ - tls
+ required: false
+ components:
+ description:
+ - The e-series logging components define the specific logs to transfer to the syslog server.
+ - At the time of writing, 'auditLog' is the only logging component but more may become available.
+ type: list
+ default: ["auditLog"]
+ required: false
+ test:
+ description:
+ - This forces a test syslog message to be sent to the stated syslog server.
+ - Only attempts transmission when I(state=present).
+ type: bool
+ default: false
+ required: false
+notes:
+ - Check mode is supported.
+ - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
+ SANtricity OS 11.40.2) and higher.
+"""
+
+EXAMPLES = """
+ - name: Add two syslog server configurations to NetApp E-Series storage array.
+ na_santricity_syslog:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ address: "{{ item }}"
+ port: 514
+ protocol: tcp
+ component: "auditLog"
+ loop:
+ - "192.168.1.1"
+ - "192.168.1.100"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+syslog:
+ description:
+ - True if syslog server configuration has been added to e-series storage array.
+ returned: on success
+ sample: True
+ type: bool
+"""
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesSyslog(NetAppESeriesModule):
+ def __init__(self):
+ ansible_options = dict(
+ state=dict(choices=["present", "absent"], required=False, default="present"),
+ address=dict(type="str", required=False),
+ port=dict(type="int", default=514, required=False),
+ protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False),
+ components=dict(type="list", required=False, default=["auditLog"]),
+ test=dict(type="bool", default=False, require=False))
+
+ required_if = [["state", "present", ["address", "port", "protocol", "components"]]]
+ mutually_exclusive = [["test", "absent"]]
+ super(NetAppESeriesSyslog, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ mutually_exclusive=mutually_exclusive,
+ required_if=required_if,
+ supports_check_mode=True)
+ args = self.module.params
+
+ self.syslog = args["state"] in ["present"]
+ self.address = args["address"]
+ self.port = args["port"]
+ self.protocol = args["protocol"]
+ self.components = args["components"]
+ self.test = args["test"]
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.creds = dict(url_password=args["api_password"],
+ validate_certs=args["validate_certs"],
+ url_username=args["api_username"], )
+
+ self.components.sort()
+ self.check_mode = self.module.check_mode
+
+ # Check whether request needs to be forwarded on to the controller web services rest api.
+ self.url_path_prefix = ""
+ if not self.is_embedded() and self.ssid != 0:
+ self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
+
+ def get_configuration(self):
+ """Retrieve existing syslog configuration."""
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog" % self.ssid)
+ return result
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def test_configuration(self, body):
+ """Send test syslog message to the storage array.
+
+ Allows fix number of retries to occur before failure is issued to give the storage array time to create
+ new syslog server record.
+ """
+ try:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s/test" % (self.ssid, body["id"]), method='POST')
+ except Exception as err:
+ self.module.fail_json(msg="We failed to send test message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ """Post the syslog request to array."""
+ config_match = None
+ perfect_match = None
+ update = False
+ body = dict()
+
+ # search existing configuration for syslog server entry match
+ configs = self.get_configuration()
+ if self.address:
+ for config in configs:
+ if config["serverAddress"] == self.address:
+ config_match = config
+ if (config["port"] == self.port and config["protocol"] == self.protocol and
+ len(config["components"]) == len(self.components) and
+ all([component["type"] in self.components for component in config["components"]])):
+ perfect_match = config_match
+ break
+
+ # generate body for the http request
+ if self.syslog:
+ if not perfect_match:
+ update = True
+ if config_match:
+ body.update(dict(id=config_match["id"]))
+ components = [dict(type=component_type) for component_type in self.components]
+ body.update(dict(serverAddress=self.address, port=self.port,
+ protocol=self.protocol, components=components))
+ self.make_configuration_request(body)
+
+ elif config_match:
+
+ # remove specific syslog server configuration
+ if self.address:
+ update = True
+ body.update(dict(id=config_match["id"]))
+ self.make_configuration_request(body)
+
+ # if no address is specified, remove all syslog server configurations
+ elif configs:
+ update = True
+ for config in configs:
+ body.update(dict(id=config["id"]))
+ self.make_configuration_request(body)
+
+ return update
+
+ def make_configuration_request(self, body):
+ # make http request(s)
+ if not self.check_mode:
+ try:
+ if self.syslog:
+ if "id" in body:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s" % (self.ssid, body["id"]),
+ method='POST', data=body)
+ else:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog" % self.ssid, method='POST', data=body)
+ body.update(result)
+
+ # send syslog test message
+ if self.test:
+ self.test_configuration(body)
+
+ elif "id" in body:
+ rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s" % (self.ssid, body["id"]), method='DELETE')
+
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update(self):
+ """Update configuration and respond to ansible."""
+ update = self.update_configuration()
+ self.module.exit_json(msg="The syslog settings have been updated.", changed=update)
+
+
+def main():
+ settings = NetAppESeriesSyslog()
+ settings.update()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py
new file mode 100644
index 00000000..d7a37279
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py
@@ -0,0 +1,896 @@
+#!/usr/bin/python
+
+# (c) 2020, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: na_santricity_volume
+short_description: NetApp E-Series manage storage volumes (standard and thin)
+description:
+ - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.santricity_doc
+options:
+ state:
+ description:
+ - Whether the specified volume should exist
+ type: str
+ choices: ["present", "absent"]
+ default: "present"
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+ storage_pool_name:
+ description:
+ - Required only when requested I(state=="present").
+ - Name of the storage pool wherein the volume should reside.
+ type: str
+ required: false
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter
+ type: str
+ choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"]
+ default: "gb"
+ size:
+ description:
+ - Required only when I(state=="present").
+ - Size of the volume in I(size_unit).
+ - Size of the virtual volume in the case of a thin volume in I(size_unit).
+ - Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may
+ exist.
+ type: float
+ required: true
+ segment_size_kb:
+ description:
+ - Segment size of the volume
+ - All values are in kibibytes.
+ - Some common choices include 8, 16, 32, 64, 128, 256, and 512 but options are system
+ dependent.
+ - Retrieve the definitive s ystem list from M(netapp_eseries.santricity.na_santricity_facts) under segment_sizes.
+ - When the storage pool is a raidDiskPool then the segment size must be 128kb.
+ - Segment size migrations are not allowed in this module
+ type: int
+ default: 128
+ thin_provision:
+ description:
+ - Whether the volume should be thin provisioned.
+ - Thin volumes can only be created when I(raid_level=="raidDiskPool").
+ - Generally, use of thin-provisioning is not recommended due to performance impacts.
+ type: bool
+ default: false
+ required: false
+ thin_volume_repo_size:
+ description:
+ - This value (in size_unit) sets the allocated space for the thin provisioned repository.
+ - Initial value must between or equal to 4gb and 256gb in increments of 4gb.
+ - During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb.
+ - This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic").
+ - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
+ type: int
+ required: false
+ thin_volume_max_repo_size:
+ description:
+ - This is the maximum amount the thin volume repository will be allowed to grow.
+ - Only has significance when I(thin_volume_expansion_policy=="automatic").
+ - When the percentage I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds
+ I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute
+ the I(thin_volume_expansion_policy) policy.
+ - Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum
+ repository size.
+ - Default will be the same as I(size).
+ type: float
+ required: false
+ thin_volume_expansion_policy:
+ description:
+ - This is the thin volume expansion policy.
+ - When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the
+ I(thin_volume_max_repo_size) will be automatically expanded.
+ - When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the
+ storage system will wait for manual intervention.
+ - The thin volume_expansion policy can not be modified on existing thin volumes in this module.
+ - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
+ type: str
+ choices: ["automatic", "manual"]
+ default: "automatic"
+ required: false
+ thin_volume_growth_alert_threshold:
+ description:
+ - This is the thin provision repository utilization threshold (in percent).
+ - When the percentage of used storage of the maximum repository size exceeds this value then a alert will
+ be issued and the I(thin_volume_expansion_policy) will be executed.
+ - Values must be between or equal to 10 and 99.
+ type: int
+ default: 95
+ required: false
+ owning_controller:
+ description:
+ - Specifies which controller will be the primary owner of the volume
+ - Not specifying will allow the controller to choose ownership.
+ type: str
+ choices: ["A", "B"]
+ required: false
+ ssd_cache_enabled:
+ description:
+ - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
+ - The default value is to ignore existing SSD cache setting.
+ type: bool
+ default: false
+ required: false
+ data_assurance_enabled:
+ description:
+ - Determines whether data assurance (DA) should be enabled for the volume
+ - Only available when creating a new volume and on a storage pool with drives supporting the DA capability.
+ type: bool
+ default: false
+ required: false
+ read_cache_enable:
+ description:
+ - Indicates whether read caching should be enabled for the volume.
+ type: bool
+ default: true
+ required: false
+ read_ahead_enable:
+ description:
+ - Indicates whether or not automatic cache read-ahead is enabled.
+ - This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot
+ benefit from read ahead caching.
+ type: bool
+ default: true
+ required: false
+ write_cache_enable:
+ description:
+ - Indicates whether write-back caching should be enabled for the volume.
+ type: bool
+ default: true
+ required: false
+ cache_without_batteries:
+ description:
+ - Indicates whether caching should be used without battery backup.
+ - Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost!
+ type: bool
+ default: false
+ required: false
+ workload_name:
+ description:
+ - Label for the workload defined by the metadata.
+ - When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage
+ array.
+ - When I(workload_name) exists on the storage array but the metadata is different then the workload
+ definition will be updated. (Changes will update all associated volumes!)
+ - Existing workloads can be retrieved using M(netapp_eseries.santricity.na_santricity_facts).
+ type: str
+ required: false
+ workload_metadata:
+ description:
+ - Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily
+ defined for whatever the user deems useful)
+ - When I(workload_name) exists on the storage array but the metadata is different then the workload
+ definition will be updated. (Changes will update all associated volumes!)
+ - I(workload_name) must be specified when I(metadata) are defined.
+ - Dictionary key cannot be longer than 16 characters
+ - Dictionary values cannot be longer than 60 characters
+ type: dict
+ required: false
+ aliases:
+ - metadata
+ volume_metadata:
+ description:
+ - Dictionary containing metadata for the volume itself.
+ - Dictionary key cannot be longer than 14 characters
+ - Dictionary values cannot be longer than 240 characters
+ type: dict
+ required: false
+ wait_for_initialization:
+ description:
+ - Forces the module to wait for expansion operations to complete before continuing.
+ type: bool
+ default: false
+ required: false
+"""
+EXAMPLES = """
+- name: Create simple volume with workload tags (volume meta data)
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ workload_name: volume_tag
+ metadata:
+ key1: value1
+ key2: value2
+
+- name: Create a thin volume
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 131072
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+
+- name: Expand thin volume's virtual size
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+
+- name: Expand thin volume's maximum repository size
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 2048
+
+- name: Delete volume
+ na_santricity_volume:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ state: absent
+ name: volume
+"""
+RETURN = """
+msg:
+ description: State of volume
+ type: str
+ returned: always
+ sample: "Standard volume [workload_vol_1] has been created."
+"""
+
+import time
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesVolume(NetAppESeriesModule):
+ VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300
+ MAXIMUM_VOLUME_METADATA_KEY_LENGTH = 14
+ MAXIMUM_VOLUME_METADATA_VALUE_LENGTH = 240
+ MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH = 60
+
+ def __init__(self):
+ ansible_options = dict(
+ state=dict(choices=["present", "absent"], default="present"),
+ name=dict(required=True, type="str"),
+ storage_pool_name=dict(type="str"),
+ size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"], type="str"),
+ size=dict(type="float"),
+ segment_size_kb=dict(type="int", default=128, required=False),
+ owning_controller=dict(type="str", choices=["A", "B"], required=False),
+ ssd_cache_enabled=dict(type="bool", default=False),
+ data_assurance_enabled=dict(type="bool", default=False),
+ thin_provision=dict(type="bool", default=False),
+ thin_volume_repo_size=dict(type="int", required=False),
+ thin_volume_max_repo_size=dict(type="float", required=False),
+ thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"], default="automatic", required=False),
+ thin_volume_growth_alert_threshold=dict(type="int", default=95),
+ read_cache_enable=dict(type="bool", default=True),
+ read_ahead_enable=dict(type="bool", default=True),
+ write_cache_enable=dict(type="bool", default=True),
+ cache_without_batteries=dict(type="bool", default=False),
+ workload_name=dict(type="str", required=False),
+ workload_metadata=dict(type="dict", require=False, aliases=["metadata"]),
+ volume_metadata=dict(type="dict", require=False),
+ wait_for_initialization=dict(type="bool", default=False))
+
+ required_if = [
+ ["state", "present", ["storage_pool_name", "size"]],
+ ["thin_provision", "true", ["thin_volume_repo_size"]]
+ ]
+
+ super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True,
+ required_if=required_if)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.name = args["name"]
+ self.storage_pool_name = args["storage_pool_name"]
+ self.size_unit = args["size_unit"]
+ self.segment_size_kb = args["segment_size_kb"]
+ if args["size"]:
+ self.size_b = self.convert_to_aligned_bytes(args["size"])
+
+ self.owning_controller_id = None
+ if args["owning_controller"]:
+ self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002"
+
+ self.read_cache_enable = args["read_cache_enable"]
+ self.read_ahead_enable = args["read_ahead_enable"]
+ self.write_cache_enable = args["write_cache_enable"]
+ self.ssd_cache_enabled = args["ssd_cache_enabled"]
+ self.cache_without_batteries = args["cache_without_batteries"]
+ self.data_assurance_enabled = args["data_assurance_enabled"]
+
+ self.thin_provision = args["thin_provision"]
+ self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"]
+ self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"])
+ self.thin_volume_repo_size_b = None
+ self.thin_volume_max_repo_size_b = None
+
+ if args["thin_volume_repo_size"]:
+ self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"])
+ if args["thin_volume_max_repo_size"]:
+ self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"])
+
+ self.workload_name = args["workload_name"]
+ self.wait_for_initialization = args["wait_for_initialization"]
+
+ # convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to
+ # each of the workload attributes dictionary entries
+ self.metadata = []
+ if self.state == "present" and args["workload_metadata"]:
+ if not self.workload_name:
+ self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified. Array [%s]." % self.ssid)
+
+ for key, value in args["workload_metadata"].items():
+ self.metadata.append({"key": key, "value": value})
+
+ self.volume_metadata = []
+ if self.state == "present" and args["volume_metadata"]:
+ for key, value in args["volume_metadata"].items():
+ key, value = str(key), str(value)
+
+ if len(key) > self.MAXIMUM_VOLUME_METADATA_KEY_LENGTH:
+ self.module.fail_json(msg="Volume metadata keys must be less than %s characters long. Array [%s]."
+ % (str(self.MAXIMUM_VOLUME_METADATA_KEY_LENGTH), self.ssid))
+
+ if len(value) > self.MAXIMUM_VOLUME_METADATA_VALUE_LENGTH:
+ self.module.fail_json(msg="Volume metadata values must be less than %s characters long. Array [%s]."
+ % (str(self.MAXIMUM_VOLUME_METADATA_VALUE_LENGTH), self.ssid))
+
+ if value:
+ for index, start in enumerate(range(0, len(value), self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH)):
+ if len(value) > start + self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH:
+ self.volume_metadata.append({"key": "%s~%s" % (key, str(index)), "value": value[start:start + self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH]})
+ else:
+ self.volume_metadata.append({"key": "%s~%s" % (key, str(index)), "value": value[start:len(value)]})
+ else:
+ self.volume_metadata.append({"key": "%s~0" % key, "value": ""})
+
+ if self.state == "present" and self.thin_provision:
+ if not self.thin_volume_max_repo_size_b:
+ self.thin_volume_max_repo_size_b = self.size_b
+
+ if not self.thin_volume_expansion_policy:
+ self.thin_volume_expansion_policy = "automatic"
+
+ if self.size_b > 256 * 1024 ** 4:
+ self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size."
+ " Attempted size [%sg]" % (self.size_b * 1024 ** 3))
+
+ if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and
+ self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b):
+ self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum"
+ " repository size. Array [%s]." % self.ssid)
+
+ if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99:
+ self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99."
+ "thin_volume_growth_alert_threshold [%s]. Array [%s]."
+ % (self.thin_volume_growth_alert_threshold, self.ssid))
+
+ self.volume_detail = None
+ self.pool_detail = None
+ self.workload_id = None
+
+ def convert_to_aligned_bytes(self, size):
+ """Convert size to the truncated byte size that aligns on the segment size."""
+ size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit])
+ segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"])
+ segment_count = int(size_bytes / segment_size_bytes)
+ return segment_count * segment_size_bytes
+
+ def get_volume(self):
+ """Retrieve volume details from storage array."""
+ volumes = list()
+ thin_volumes = list()
+ try:
+ rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+ try:
+ rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+
+ volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name]
+ return volume_detail[0] if volume_detail else dict()
+
+ def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5):
+ """Waits until volume becomes available.
+
+ :raises AnsibleFailJson when retries are exhausted.
+ """
+ if retries == 0:
+ self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]."
+ % (self.name, self.ssid))
+ if not self.get_volume():
+ time.sleep(5)
+ self.wait_for_volume_availability(retries=retries - 1)
+
+ def wait_for_volume_action(self, timeout=None):
+ """Waits until volume action is complete is complete.
+ :param: int timeout: Wait duration measured in seconds. Waits indefinitely when None.
+ """
+ action = "unknown"
+ percent_complete = None
+ while action != "complete":
+ time.sleep(5)
+
+ try:
+ rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid)
+
+ # Search long lived operations for volume
+ action = "complete"
+ for operation in operations["longLivedOpsProgress"]:
+ if operation["volAction"] is not None:
+ for key in operation.keys():
+ if (operation[key] is not None and "volumeRef" in operation[key] and
+ (operation[key]["volumeRef"] == self.volume_detail["id"] or
+ ("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))):
+ action = operation["volAction"]
+ percent_complete = operation["init"]["percentComplete"]
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(err)))
+
+ if timeout is not None:
+ if timeout <= 0:
+ self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining"
+ " [%s]. Array Id [%s]." % (action, percent_complete, self.ssid))
+ self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid))
+ if timeout:
+ timeout -= 5
+
+ self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete))
+ self.module.log("Expansion action is complete.")
+
+ def get_storage_pool(self):
+ """Retrieve storage pool details from the storage array."""
+ storage_pools = list()
+ try:
+ rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+
+ pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name]
+ return pool_detail[0] if pool_detail else dict()
+
+ def check_storage_pool_sufficiency(self):
+ """Perform a series of checks as to the sufficiency of the storage pool for the volume."""
+ if not self.pool_detail:
+ self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
+
+ if not self.volume_detail:
+ if self.thin_provision and not self.pool_detail['diskPool']:
+ self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.')
+
+ if (self.data_assurance_enabled and not
+ (self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and
+ self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")):
+ self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible."
+ " Array [%s]." % self.ssid)
+
+ if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision:
+ self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
+ " Array [%s]." % self.ssid)
+ else:
+ # Check for expansion
+ if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and
+ not self.thin_provision):
+ self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
+ " Array [%s]." % self.ssid)
+
+ def update_workload_tags(self, check_mode=False):
+ """Check the status of the workload tag and update storage array definitions if necessary.
+
+ When the workload attributes are not provided but an existing workload tag name is, then the attributes will be
+ used.
+
+ :return bool: Whether changes were required to be made."""
+ change_required = False
+ workload_tags = None
+ request_body = None
+ ansible_profile_id = None
+
+ if self.workload_name:
+ try:
+ rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid)
+
+ ansible_profile_id = "Other_1"
+ request_body = dict(name=self.workload_name,
+ profileId=ansible_profile_id,
+ workloadInstanceIndex=None,
+ isValid=True)
+
+ # evaluate and update storage array when needed
+ for tag in workload_tags:
+ if tag["name"] == self.workload_name:
+ self.workload_id = tag["id"]
+
+ if not self.metadata:
+ break
+
+ # Determine if core attributes (everything but profileId) is the same
+ metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata)
+ tag_set = set(tuple(sorted(attr.items()))
+ for attr in tag["workloadAttributes"] if attr["key"] != "profileId")
+ if metadata_set != tag_set:
+ self.module.log("Workload tag change is required!")
+ change_required = True
+
+ # only perform the required action when check_mode==False
+ if change_required and not check_mode:
+ self.metadata.append(dict(key="profileId", value=ansible_profile_id))
+ request_body.update(dict(isNewWorkloadInstance=False,
+ isWorkloadDataInitialized=True,
+ isWorkloadCardDataToBeReset=True,
+ workloadAttributes=self.metadata))
+ try:
+ rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]),
+ data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+ self.module.log("Workload tag [%s] required change." % self.workload_name)
+ break
+
+ # existing workload tag not found so create new workload tag
+ else:
+ change_required = True
+ self.module.log("Workload tag creation is required!")
+
+ if change_required and not check_mode:
+ if self.metadata:
+ self.metadata.append(dict(key="profileId", value=ansible_profile_id))
+ else:
+ self.metadata = [dict(key="profileId", value=ansible_profile_id)]
+
+ request_body.update(dict(isNewWorkloadInstance=True,
+ isWorkloadDataInitialized=False,
+ isWorkloadCardDataToBeReset=False,
+ workloadAttributes=self.metadata))
+ try:
+ rc, resp = self.request("storage-systems/%s/workloads" % self.ssid,
+ method="POST", data=request_body)
+ self.workload_id = resp["id"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+ self.module.log("Workload tag [%s] was added." % self.workload_name)
+
+ return change_required
+
+ def get_volume_property_changes(self):
+ """Retrieve the volume update request body when change(s) are required.
+
+ :raise AnsibleFailJson when attempting to change segment size on existing volume.
+ :return dict: request body when change(s) to a volume's properties are required.
+ """
+ change = False
+ request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[],
+ cacheSettings=dict(readCacheEnable=self.read_cache_enable,
+ writeCacheEnable=self.write_cache_enable))
+
+ # check for invalid modifications
+ if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]):
+ self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified."
+ % self.volume_detail["segmentSize"])
+
+ # common thick/thin volume properties
+ if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or
+ self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or
+ self.ssd_cache_enabled != self.volume_detail["flashCached"]):
+ change = True
+
+ # controller ownership
+ if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]:
+ change = True
+ request_body.update(dict(owningControllerId=self.owning_controller_id))
+
+ # volume meta tags
+ request_body["metaTags"].extend(self.volume_metadata)
+ for entry in self.volume_metadata:
+ if entry not in self.volume_detail["metadata"]:
+ change = True
+
+ if self.workload_name:
+ request_body["metaTags"].extend([{"key": "workloadId", "value": self.workload_id},
+ {"key": "volumeTypeId", "value": "volume"}])
+
+ if ({"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"] or
+ {"key": "volumeTypeId", "value": "volume"} not in self.volume_detail["metadata"]):
+ change = True
+
+ if len(self.volume_detail["metadata"]) != len(request_body["metaTags"]):
+ change = True
+
+ # thick/thin volume specific properties
+ if self.thin_provision:
+ if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]):
+ change = True
+ request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold))
+ if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]:
+ change = True
+ request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy))
+ else:
+ if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0):
+ change = True
+ request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable))
+ if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]:
+ change = True
+ request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries))
+
+ return request_body if change else dict()
+
+ def get_expand_volume_changes(self):
+ """Expand the storage specifications for the existing thick/thin volume.
+
+ :raise AnsibleFailJson when a thick/thin volume expansion request fails.
+ :return dict: dictionary containing all the necessary values for volume expansion request
+ """
+ request_body = dict()
+
+ if self.size_b < int(self.volume_detail["capacity"]):
+ self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]"
+ % (self.name, self.ssid))
+
+ if self.volume_detail["thinProvisioned"]:
+ if self.size_b > int(self.volume_detail["capacity"]):
+ request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b))
+ self.module.log("Thin volume virtual size have been expanded.")
+
+ if self.volume_detail["expansionPolicy"] == "automatic":
+ if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]):
+ request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b))
+ self.module.log("Thin volume maximum repository size have been expanded (automatic policy).")
+
+ elif self.volume_detail["expansionPolicy"] == "manual":
+ if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]):
+ change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"])
+ if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0:
+ self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb"
+ " and 256gb in increments of 4gb. Attempted size [%sg]."
+ % (self.thin_volume_repo_size_b * 1024 ** 3))
+
+ request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b))
+ self.module.log("Thin volume maximum repository size have been expanded (manual policy).")
+
+ elif self.size_b > int(self.volume_detail["capacity"]):
+ request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b))
+ self.module.log("Volume storage capacities have been expanded.")
+
+ return request_body
+
+ def create_volume(self):
+ """Create thick/thin volume according to the specified criteria."""
+ body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes",
+ dataAssuranceEnabled=self.data_assurance_enabled)
+
+ if self.volume_metadata:
+ body.update({"metaTags": self.volume_metadata})
+
+ if self.thin_provision:
+ body.update(dict(virtualSize=self.size_b,
+ repositorySize=self.thin_volume_repo_size_b,
+ maximumRepositorySize=self.thin_volume_max_repo_size_b,
+ expansionPolicy=self.thin_volume_expansion_policy,
+ growthAlertThreshold=self.thin_volume_growth_alert_threshold))
+ try:
+ rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+
+ self.module.log("New thin volume created [%s]." % self.name)
+
+ else:
+ body.update(dict(size=self.size_b, segSize=self.segment_size_kb))
+ try:
+ rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+
+ self.module.log("New volume created [%s]." % self.name)
+
+ def update_volume_properties(self):
+ """Update existing thin-volume or volume properties.
+
+ :raise AnsibleFailJson when either thick/thin volume update request fails.
+ :return bool: whether update was applied
+ """
+ self.wait_for_volume_availability()
+ self.volume_detail = self.get_volume()
+
+ request_body = self.get_volume_property_changes()
+
+ if request_body:
+ if self.thin_provision:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s"
+ % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(error)))
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(error)))
+ return True
+ return False
+
+ def expand_volume(self):
+ """Expand the storage specifications for the existing thick/thin volume.
+
+ :raise AnsibleFailJson when a thick/thin volume expansion request fails.
+ """
+ request_body = self.get_expand_volume_changes()
+ if request_body:
+ if self.volume_detail["thinProvisioned"]:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand"
+ % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(err)))
+ self.module.log("Thin volume specifications have been expanded.")
+
+ else:
+ try:
+ rc, resp = self.request(
+ "storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']),
+ data=request_body, method="POST")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(err)))
+
+ self.module.log("Volume storage capacities have been expanded.")
+
+ def delete_volume(self):
+ """Delete existing thin/thick volume."""
+ if self.thin_provision:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+ self.module.log("Thin volume deleted [%s]." % self.name)
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+ self.module.log("Volume deleted [%s]." % self.name)
+
+ def apply(self):
+ """Determine and apply any changes necessary to satisfy the specified criteria.
+
+ :raise AnsibleExitJson when completes successfully"""
+ change = False
+ msg = None
+
+ self.volume_detail = self.get_volume()
+ self.pool_detail = self.get_storage_pool()
+
+ # Determine whether changes need to be applied to existing workload tags
+ if self.state == 'present' and self.update_workload_tags(check_mode=True):
+ change = True
+
+ # Determine if any changes need to be applied
+ if self.volume_detail:
+ if self.state == 'absent':
+ change = True
+
+ elif self.state == 'present':
+ if self.get_expand_volume_changes() or self.get_volume_property_changes():
+ change = True
+
+ elif self.state == 'present':
+ if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or
+ self.thin_volume_repo_size_b > 256 * 1024 ** 3 or
+ self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0):
+ self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in"
+ " increments of 4gb. Attempted size [%sg]."
+ % (self.thin_volume_repo_size_b * 1024 ** 3))
+ change = True
+
+ self.module.log("Update required: [%s]." % change)
+
+ # Apply any necessary changes
+ if change and not self.module.check_mode:
+ if self.state == 'present':
+ if self.update_workload_tags():
+ msg = "Workload tag change occurred."
+
+ if not self.volume_detail:
+ self.check_storage_pool_sufficiency()
+ self.create_volume()
+ self.update_volume_properties()
+ msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created."
+ else:
+ if self.update_volume_properties():
+ msg = "Volume [%s] properties were updated."
+
+ if self.get_expand_volume_changes():
+ self.expand_volume()
+ msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded."
+
+ if self.wait_for_initialization:
+ self.module.log("Waiting for volume operation to complete.")
+ self.wait_for_volume_action()
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ msg = "Volume [%s] has been deleted."
+
+ else:
+ msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists."
+
+ self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change)
+
+
+def main():
+ volume = NetAppESeriesVolume()
+ volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py
new file mode 100644
index 00000000..cea2107f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_alerts
+short_description: NetApp E-Series manage email notification settings
+description:
+ - Certain E-Series systems have the capability to send email notifications on potentially critical events.
+ - This module will allow the owner of the system to specify email recipients for these messages.
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Enable/disable the sending of email-based alerts.
+ default: enabled
+ required: false
+ type: str
+ choices:
+ - enabled
+ - disabled
+ server:
+ description:
+ - A fully qualified domain name, IPv4 address, or IPv6 address of a mail server.
+ - To use a fully qualified domain name, you must configure a DNS server on both controllers using
+ M(netapp_eseries.santricity.netapp_e_mgmt_interface).
+ - Required when I(state=enabled).
+ type: str
+ required: no
+ sender:
+ description:
+ - This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account.
+ - Required when I(state=enabled).
+ type: str
+ required: no
+ contact:
+ description:
+ - Allows the owner to specify some free-form contact information to be included in the emails.
+ - This is typically utilized to provide a contact phone number.
+ type: str
+ required: no
+ recipients:
+ description:
+ - The email addresses that will receive the email notifications.
+ - Required when I(state=enabled).
+ type: list
+ required: no
+ test:
+ description:
+ - When a change is detected in the configuration, a test email will be sent.
+ - This may take a few minutes to process.
+ - Only applicable if I(state=enabled).
+ default: no
+ type: bool
+ log_path:
+ description:
+ - Path to a file on the Ansible control node to be used for debug logging
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples
+ of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical
+ events.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Enable email-based alerting
+ netapp_e_alerts:
+ state: enabled
+ sender: noreply@example.com
+ server: mail@example.com
+ contact: "Phone: 1-555-555-5555"
+ recipients:
+ - name1@example.com
+ - name2@example.com
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Disable alerting
+ netapp_e_alerts:
+ state: disabled
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+
+import json
+import logging
+from pprint import pformat
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class Alerts(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', required=False, default='enabled',
+ choices=['enabled', 'disabled']),
+ server=dict(type='str', required=False, ),
+ sender=dict(type='str', required=False, ),
+ contact=dict(type='str', required=False, ),
+ recipients=dict(type='list', required=False, ),
+ test=dict(type='bool', required=False, default=False, ),
+ log_path=dict(type='str', required=False),
+ ))
+
+ required_if = [
+ ['state', 'enabled', ['server', 'sender', 'recipients']]
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+ args = self.module.params
+ self.alerts = args['state'] == 'enabled'
+ self.server = args['server']
+ self.sender = args['sender']
+ self.contact = args['contact']
+ self.recipients = args['recipients']
+ self.test = args['test']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ # Very basic validation on email addresses: xx@yy.zz
+ email = re.compile(r"[^@]+@[^@]+\.[^@]+")
+
+ if self.sender and not email.match(self.sender):
+ self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender)
+
+ if self.recipients is not None:
+ for recipient in self.recipients:
+ if not email.match(recipient):
+ self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient)
+
+ if len(self.recipients) < 1:
+ self.module.fail_json(msg="At least one recipient address must be specified.")
+
+ def get_configuration(self):
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, headers=HEADERS,
+ **self.creds)
+ self._logger.info("Current config: %s", pformat(result))
+ return result
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ config = self.get_configuration()
+ update = False
+ body = dict()
+
+ if self.alerts:
+ body = dict(alertingEnabled=True)
+ if not config['alertingEnabled']:
+ update = True
+
+ body.update(emailServerAddress=self.server)
+ if config['emailServerAddress'] != self.server:
+ update = True
+
+ body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True)
+ if self.contact and (self.contact != config['additionalContactInformation']
+ or not config['sendAdditionalContactInformation']):
+ update = True
+
+ body.update(emailSenderAddress=self.sender)
+ if config['emailSenderAddress'] != self.sender:
+ update = True
+
+ self.recipients.sort()
+ if config['recipientEmailAddresses']:
+ config['recipientEmailAddresses'].sort()
+
+ body.update(recipientEmailAddresses=self.recipients)
+ if config['recipientEmailAddresses'] != self.recipients:
+ update = True
+
+ elif config['alertingEnabled']:
+ body = dict(alertingEnabled=False)
+ update = True
+
+ self._logger.debug(pformat(body))
+
+ if update and not self.check_mode:
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, method='POST',
+ data=json.dumps(body), headers=HEADERS, **self.creds)
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return update
+
+ def send_test_email(self):
+ """Send a test email to verify that the provided configuration is valid and functional."""
+ if not self.check_mode:
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s/device-alerts/alert-email-test' % self.ssid,
+ timeout=300, method='POST', headers=HEADERS, **self.creds)
+
+ if result['response'] != 'emailSentOK':
+ self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]."
+ % (result['response'], self.ssid))
+
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update(self):
+ update = self.update_configuration()
+
+ if self.test and update:
+ self._logger.info("An update was detected and test=True, running a test.")
+ self.send_test_email()
+
+ if self.alerts:
+ msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender)
+ else:
+ msg = 'Alerting has been disabled.'
+
+ self.module.exit_json(msg=msg, changed=update, )
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ alerts = Alerts()
+ alerts()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py
new file mode 100644
index 00000000..e2bfa419
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg
+short_description: NetApp E-Series create, remove, and update asynchronous mirror groups
+description:
+ - Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ name:
+ description:
+ - The name of the async array you wish to target, or create.
+ - If C(state) is present and the name isn't found, it will attempt to create.
+ type: str
+ required: yes
+ new_name:
+ description:
+ - New async array name
+ type: str
+ required: no
+ secondaryArrayId:
+ description:
+ - The ID of the secondary array to be used in mirroring process
+ type: str
+ required: yes
+ syncIntervalMinutes:
+ description:
+ - The synchronization interval in minutes
+ type: int
+ default: 10
+ manualSync:
+ description:
+ - Setting this to true will cause other synchronization values to be ignored
+ type: bool
+ default: 'no'
+ recoveryWarnThresholdMinutes:
+ description:
+ - Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
+ type: int
+ default: 20
+ repoUtilizationWarnThreshold:
+ description:
+ - Recovery point warning threshold
+ type: int
+ default: 80
+ interfaceType:
+ description:
+ - The intended protocol to use if both Fibre and iSCSI are available.
+ type: str
+ choices:
+ - iscsi
+ - fibre
+ syncWarnThresholdMinutes:
+ description:
+ - The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
+ default: 10
+ type: int
+ state:
+ description:
+ - A C(state) of present will either create or update the async mirror group.
+ - A C(state) of absent will remove the async mirror group.
+ type: str
+ choices: [ absent, present ]
+ required: yes
+"""
+
+EXAMPLES = """
+ - name: AMG removal
+ na_eseries_amg:
+ state: absent
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+
+ - name: AMG create
+ netapp_e_amg:
+ state: present
+ ssid: "{{ ssid }}"
+ secondaryArrayId: "{{amg_secondaryArrayId}}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ new_name: "{{amg_array_name}}"
+ name: "{{amg_name}}"
+ when: amg_create
+"""
+
+RETURN = """
+msg:
+ description: Successful creation
+ returned: success
+ type: str
+ sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
+""" # NOQA
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def has_match(module, ssid, api_url, api_pwd, api_usr, body):
+ compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
+ 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
+ desired_state = dict((x, (body.get(x))) for x in compare_keys)
+ label_exists = False
+ matches_spec = False
+ current_state = None
+ async_id = None
+ api_data = None
+ desired_name = body.get('name')
+ endpoint = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + endpoint
+ try:
+ rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
+ except Exception as e:
+ module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc())
+
+ for async_group in data:
+ if async_group['label'] == desired_name:
+ label_exists = True
+ api_data = async_group
+ async_id = async_group['groupRef']
+ current_state = dict(
+ syncIntervalMinutes=async_group['syncIntervalMinutes'],
+ syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
+ recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
+ repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
+ )
+
+ if current_state == desired_state:
+ matches_spec = True
+
+ return label_exists, matches_spec, api_data, async_id
+
+
+def create_async(module, ssid, api_url, api_pwd, api_usr, body):
+ endpoint = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + endpoint
+ post_data = json.dumps(body)
+ try:
+ rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
+ headers=HEADERS)
+ except Exception as e:
+ module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e),
+ exception=traceback.format_exc())
+ return data
+
+
+def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
+ url = api_url + endpoint
+ compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
+ 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
+ desired_state = dict((x, (body.get(x))) for x in compare_keys)
+
+ if new_name:
+ desired_state['new_name'] = new_name
+
+ post_data = json.dumps(desired_state)
+
+ try:
+ rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
+ url_username=user, url_password=pwd)
+ except Exception as e:
+ module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ return data
+
+
+def remove_amg(module, ssid, api_url, pwd, user, async_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
+ url = api_url + endpoint
+ try:
+ rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
+ headers=HEADERS)
+ except Exception as e:
+ module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ return
+
+
+def main():
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ new_name=dict(required=False, type='str'),
+ secondaryArrayId=dict(required=True, type='str'),
+ syncIntervalMinutes=dict(required=False, default=10, type='int'),
+ manualSync=dict(required=False, default=False, type='bool'),
+ recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
+ repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
+ interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ new_name = p.pop('new_name')
+ state = p.pop('state')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
+
+ if state == 'present':
+ if name_exists and spec_matches:
+ module.exit_json(changed=False, msg="Desired state met", **api_data)
+ elif name_exists and not spec_matches:
+ results = update_async(module, ssid, api_url, pwd, user,
+ p, new_name, async_id)
+ module.exit_json(changed=True,
+ msg="Async mirror group updated", async_id=async_id,
+ **results)
+ elif not name_exists:
+ results = create_async(module, ssid, api_url, user, pwd, p)
+ module.exit_json(changed=True, **results)
+
+ elif state == 'absent':
+ if name_exists:
+ remove_amg(module, ssid, api_url, pwd, user, async_id)
+ module.exit_json(changed=True, msg="Async mirror group removed.",
+ async_id=async_id)
+ else:
+ module.exit_json(changed=False,
+ msg="Async Mirror group: %s already absent" % p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py
new file mode 100644
index 00000000..a67506f3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg_role
+short_description: NetApp E-Series update the role of a storage array within an Asynchronous Mirror Group (AMG).
+description:
+ - Update a storage array to become the primary or secondary instance in an asynchronous mirror group
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ description:
+ - The ID of the primary storage array for the async mirror action
+ required: yes
+ type: str
+ name:
+ description:
+ - Name of the role
+ required: yes
+ type: str
+ role:
+ description:
+ - Whether the array should be the primary or secondary array for the AMG
+ required: yes
+ type: str
+ choices: ['primary', 'secondary']
+ noSync:
+ description:
+ - Whether to avoid synchronization prior to role reversal
+ required: no
+ default: no
+ type: bool
+ force:
+ description:
+ - Whether to force the role reversal regardless of the online-state of the primary
+ required: no
+ default: no
+ type: bool
+"""
+
+EXAMPLES = """
+ - name: Update the role of a storage array
+ netapp_e_amg_role:
+ name: updating amg role
+ role: primary
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+
+RETURN = """
+msg:
+ description: Failure message
+ returned: failure
+ type: str
+ sample: "No Async Mirror Group with the name."
+"""
+import json
+import traceback
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as e:
+ r = e.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
+ amg_exists = False
+ has_desired_role = False
+ amg_id = None
+ amg_data = None
+ get_amgs = 'storage-systems/%s/async-mirrors' % ssid
+ url = api_url + get_amgs
+ try:
+ amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
+ headers=HEADERS)
+ except Exception:
+ module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
+
+ for amg in amgs:
+ if amg['label'] == name:
+ amg_exists = True
+ amg_id = amg['id']
+ amg_data = amg
+ if amg['localRole'] == body.get('role'):
+ has_desired_role = True
+
+ return amg_exists, has_desired_role, amg_id, amg_data
+
+
+def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
+ endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
+ url = api_url + endpoint
+ post_data = json.dumps(body)
+ try:
+ request(url, data=post_data, method='POST', url_username=api_usr,
+ url_password=api_pwd, headers=HEADERS)
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
+ status_url = api_url + status_endpoint
+ try:
+ rc, status = request(status_url, method='GET', url_username=api_usr,
+ url_password=api_pwd, headers=HEADERS)
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to check status of AMG after role reversal. "
+ "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
+ exception=traceback.format_exc())
+
+ # Here we wait for the role reversal to complete
+ if 'roleChangeProgress' in status:
+ while status['roleChangeProgress'] != "none":
+ try:
+ rc, status = request(status_url, method='GET',
+ url_username=api_usr, url_password=api_pwd, headers=HEADERS)
+ except Exception as e:
+ module.fail_json(
+ msg="Failed to check status of AMG after role reversal. "
+ "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)),
+ exception=traceback.format_exc())
+ return status
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True, type='str'),
+ role=dict(required=True, choices=['primary', 'secondary']),
+ noSync=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ name = p.pop('name')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
+
+ if not agm_exists:
+ module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
+ elif has_desired_role:
+ module.exit_json(changed=False, **amg_data)
+
+ else:
+ amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
+ if amg_data:
+ module.exit_json(changed=True, **amg_data)
+ else:
+ module.exit_json(changed=True, msg="AMG role changed.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py
new file mode 100644
index 00000000..056accd6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_amg_sync
+short_description: NetApp E-Series conduct synchronization actions on asynchronous mirror groups.
+description:
+ - Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ description:
+ - The ID of the storage array containing the AMG you wish to target
+ type: str
+ name:
+ description:
+ - The name of the async mirror group you wish to target
+ type: str
+ required: yes
+ state:
+ description:
+ - The synchronization action you'd like to take.
+ - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in
+ progress, it will return with an OK status.
+ - If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
+ type: str
+ choices:
+ - running
+ - suspended
+ required: yes
+ delete_recovery_point:
+ description:
+ - Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
+ - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last
+ failures point will be deleted and synchronization will continue.
+ - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary
+ and the failures point will be preserved.
+ - "NOTE: This only has impact for newly launched syncs."
+ type: bool
+ default: no
+"""
+EXAMPLES = """
+ - name: start AMG async
+ netapp_e_amg_sync:
+ name: "{{ amg_sync_name }}"
+ state: running
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+"""
+RETURN = """
+json:
+ description: The object attributes of the AMG.
+ returned: success
+ type: str
+ example:
+ {
+ "changed": false,
+ "connectionType": "fc",
+ "groupRef": "3700000060080E5000299C24000006EF57ACAC70",
+ "groupState": "optimal",
+ "id": "3700000060080E5000299C24000006EF57ACAC70",
+ "label": "made_with_ansible",
+ "localRole": "primary",
+ "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC",
+ "orphanGroup": false,
+ "recoveryPointAgeAlertThresholdMinutes": 20,
+ "remoteRole": "secondary",
+ "remoteTarget": {
+ "nodeName": {
+ "ioInterfaceType": "fc",
+ "iscsiNodeName": null,
+ "remoteNodeWWN": "20040080E5299F1C"
+ },
+ "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC",
+ "scsiinitiatorTargetBaseProperties": {
+ "ioInterfaceType": "fc",
+ "iscsiinitiatorTargetBaseParameters": null
+ }
+ },
+ "remoteTargetId": "ansible2",
+ "remoteTargetName": "Ansible2",
+ "remoteTargetWwn": "60080E5000299F880000000056A25D56",
+ "repositoryUtilizationWarnThreshold": 80,
+ "roleChangeProgress": "none",
+ "syncActivity": "idle",
+ "syncCompletionTimeAlertThresholdMinutes": 10,
+ "syncIntervalMinutes": 10,
+ "worldWideName": "60080E5000299C24000006EF57ACAC70"
+ }
+"""
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.urls import open_url
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as e:
+ r = e.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class AMGsync(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ name=dict(required=True, type='str'),
+ ssid=dict(required=True, type='str'),
+ state=dict(required=True, type='str', choices=['running', 'suspended']),
+ delete_recovery_point=dict(required=False, type='bool', default=False)
+ ))
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.name = args['name']
+ self.ssid = args['ssid']
+ self.state = args['state']
+ self.delete_recovery_point = args['delete_recovery_point']
+ try:
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.url = args['api_url']
+ except KeyError:
+ self.module.fail_json(msg="You must pass in api_username"
+ "and api_password and api_url to the module.")
+ self.certs = args['validate_certs']
+
+ self.post_headers = {
+ "Accept": "application/json",
+ "Content-Type": "application/json"
+ }
+ self.amg_id, self.amg_obj = self.get_amg()
+
+ def get_amg(self):
+ endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid
+ (rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ headers=self.post_headers)
+ try:
+ amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id']
+ amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0]
+ except IndexError:
+ self.module.fail_json(
+ msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid))
+ return amg_id, amg_obj
+
+ @property
+ def current_state(self):
+ amg_id, amg_obj = self.get_amg()
+ return amg_obj['syncActivity']
+
+ def run_sync_action(self):
+ # If we get to this point we know that the states differ, and there is no 'err' state,
+ # so no need to revalidate
+
+ post_body = dict()
+ if self.state == 'running':
+ if self.current_state == 'idle':
+ if self.delete_recovery_point:
+ post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point))
+ suffix = 'sync'
+ else:
+ # In a suspended state
+ suffix = 'resume'
+ else:
+ suffix = 'suspend'
+
+ endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix)
+
+ (rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers,
+ ignore_errors=True)
+
+ if not str(rc).startswith('2'):
+ self.module.fail_json(msg=str(resp['errorMessage']))
+
+ return resp
+
+ def apply(self):
+ state_map = dict(
+ running=['active'],
+ suspended=['userSuspended', 'internallySuspended', 'paused'],
+ err=['unkown', '_UNDEFINED'])
+
+ if self.current_state not in state_map[self.state]:
+ if self.current_state in state_map['err']:
+ self.module.fail_json(
+ msg="The sync is a state of '%s', this requires manual intervention. " +
+ "Please investigate and try again" % self.current_state)
+ else:
+ self.amg_obj = self.run_sync_action()
+
+ (ret, amg) = self.get_amg()
+ self.module.exit_json(changed=False, **amg)
+
+
+def main():
+ sync = AMGsync()
+ sync.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py
new file mode 100644
index 00000000..f039626a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py
@@ -0,0 +1,314 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_asup
+short_description: NetApp E-Series manage auto-support settings
+description:
+ - Allow the auto-support settings to be configured for an individual E-Series storage-system
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Enable/disable the E-Series auto-support configuration.
+ - When this option is enabled, configuration, logs, and other support-related information will be relayed
+ to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
+ be collected.
+ default: enabled
+ type: str
+ choices:
+ - enabled
+ - disabled
+ aliases:
+ - asup
+ - auto_support
+ - autosupport
+ active:
+ description:
+ - Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
+ possible that the bundle did not contain all of the required information at the time of the event.
+ Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
+ of support data in order ot resolve the problem.
+ - Only applicable if I(state=enabled).
+ default: yes
+ type: bool
+ start:
+ description:
+ - A start hour may be specified in a range from 0 to 23 hours.
+ - ASUP bundles will be sent daily between the provided start and end time (UTC).
+ - I(start) must be less than I(end).
+ aliases:
+ - start_time
+ default: 0
+ type: int
+ end:
+ description:
+ - An end hour may be specified in a range from 1 to 24 hours.
+ - ASUP bundles will be sent daily between the provided start and end time (UTC).
+ - I(start) must be less than I(end).
+ aliases:
+ - end_time
+ default: 24
+ type: int
+ days:
+ description:
+ - A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
+ of the provided days.
+ choices:
+ - monday
+ - tuesday
+ - wednesday
+ - thursday
+ - friday
+ - saturday
+ - sunday
+ required: no
+ type: list
+ aliases:
+ - days_of_week
+ - schedule_days
+ verbose:
+ description:
+ - Provide the full ASUP configuration in the return.
+ default: no
+ required: no
+ type: bool
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
+ respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
+ disabled if desired.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher.
+"""
+
+EXAMPLES = """
+ - name: Enable ASUP and allow pro-active retrieval of bundles
+ netapp_e_asup:
+ state: enabled
+ active: yes
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
+ netapp_e_asup:
+ start: 17
+ end: 20
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+asup:
+ description:
+ - True if ASUP is enabled.
+ returned: on success
+ sample: True
+ type: bool
+active:
+ description:
+ - True if the active option has been enabled.
+ returned: on success
+ sample: True
+ type: bool
+cfg:
+ description:
+ - Provide the full ASUP configuration.
+ returned: on success when I(verbose=true).
+ type: complex
+ contains:
+ asupEnabled:
+ description:
+ - True if ASUP has been enabled.
+ type: bool
+ onDemandEnabled:
+ description:
+ - True if ASUP active monitoring has been enabled.
+ type: bool
+ daysOfWeek:
+ description:
+ - The days of the week that ASUP bundles will be sent.
+ type: list
+"""
+
+import json
+import logging
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class Asup(object):
+ DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'],
+ choices=['enabled', 'disabled']),
+ active=dict(type='bool', required=False, default=True, ),
+ days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'],
+ choices=self.DAYS_OPTIONS),
+ start=dict(type='int', required=False, default=0, aliases=['start_time']),
+ end=dict(type='int', required=False, default=24, aliases=['end_time']),
+ verbose=dict(type='bool', required=False, default=False),
+ log_path=dict(type='str', required=False),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
+ args = self.module.params
+ self.asup = args['state'] == 'enabled'
+ self.active = args['active']
+ self.days = args['days']
+ self.start = args['start']
+ self.end = args['end']
+ self.verbose = args['verbose']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ if self.start >= self.end:
+ self.module.fail_json(msg="The value provided for the start time is invalid."
+ " It must be less than the end time.")
+ if self.start < 0 or self.start > 23:
+ self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
+ else:
+ self.start = self.start * 60
+ if self.end < 1 or self.end > 24:
+ self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
+ else:
+ self.end = min(self.end * 60, 1439)
+
+ if not self.days:
+ self.days = self.DAYS_OPTIONS
+
+ def get_configuration(self):
+ try:
+ (rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds)
+
+ if not (result['asupCapable'] and result['onDemandCapable']):
+ self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid))
+ return result
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ config = self.get_configuration()
+ update = False
+ body = dict()
+
+ if self.asup:
+ body = dict(asupEnabled=True)
+ if not config['asupEnabled']:
+ update = True
+
+ if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active:
+ update = True
+ body.update(dict(onDemandEnabled=self.active,
+ remoteDiagsEnabled=self.active))
+ self.days.sort()
+ config['schedule']['daysOfWeek'].sort()
+
+ body['schedule'] = dict(daysOfWeek=self.days,
+ dailyMinTime=self.start,
+ dailyMaxTime=self.end,
+ weeklyMinTime=self.start,
+ weeklyMaxTime=self.end)
+
+ if self.days != config['schedule']['daysOfWeek']:
+ update = True
+ if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']:
+ update = True
+ elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']:
+ update = True
+
+ elif config['asupEnabled']:
+ body = dict(asupEnabled=False)
+ update = True
+
+ self._logger.info(pformat(body))
+
+ if update and not self.check_mode:
+ try:
+ (rc, result) = request(self.url + 'device-asup', method='POST',
+ data=json.dumps(body), headers=HEADERS, **self.creds)
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return update
+
+ def update(self):
+ update = self.update_configuration()
+ cfg = self.get_configuration()
+ if self.verbose:
+ self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
+ asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg)
+ else:
+ self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
+ asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'])
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ settings = Asup()
+ settings()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py
new file mode 100644
index 00000000..814a72d3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_auditlog
+short_description: NetApp E-Series manage audit-log configuration
+description:
+ - This module allows an e-series storage system owner to set audit-log configuration parameters.
+version_added: '2.7'
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ max_records:
+ description:
+ - The maximum number log messages audit-log will retain.
+ - Max records must be between and including 100 and 50000.
+ default: 50000
+ type: int
+ log_level:
+ description: Filters the log messages according to the specified log level selection.
+ choices:
+ - all
+ - writeOnly
+ default: writeOnly
+ type: str
+ full_policy:
+ description: Specifies what audit-log should do once the number of entries approach the record limit.
+ choices:
+ - overWrite
+ - preventSystemAccess
+ default: overWrite
+ type: str
+ threshold:
+ description:
+ - This is the memory full percent threshold that audit-log will start issuing warning messages.
+ - Percent range must be between and including 60 and 90.
+ default: 90
+ type: int
+ force:
+ description:
+ - Forces the audit-log configuration to delete log history when log messages fullness cause immediate
+ warning or full condition.
+ - Warning! This will cause any existing audit-log messages to be deleted.
+ - This is only applicable for I(full_policy=preventSystemAccess).
+ type: bool
+ default: no
+ log_path:
+ description: A local path to a file to be used for debug logging.
+ required: no
+ type: str
+notes:
+ - Check mode is supported.
+ - This module is currently only supported with the Embedded Web Services API v3.0 and higher.
+"""
+
+EXAMPLES = """
+- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity.
+ netapp_e_auditlog:
+ api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
+ api_username: "{{ netapp_e_api_username }}"
+ api_password: "{{ netapp_e_api_password }}"
+ ssid: "{{ netapp_e_ssid }}"
+ validate_certs: no
+ max_records: 50000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ log_path: /path/to/log_file.log
+- name: Define audit-log utilize the default values.
+ netapp_e_auditlog:
+ api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
+ api_username: "{{ netapp_e_api_username }}"
+ api_password: "{{ netapp_e_api_password }}"
+ ssid: "{{ netapp_e_ssid }}"
+- name: Force audit-log configuration when full or warning conditions occur while enacting preventSystemAccess policy.
+ netapp_e_auditlog:
+ api_url: "https://{{ netapp_e_api_host }}/devmgr/v2"
+ api_username: "{{ netapp_e_api_username }}"
+ api_password: "{{ netapp_e_api_password }}"
+ ssid: "{{ netapp_e_ssid }}"
+ max_records: 5000
+ log_level: all
+ full_policy: preventSystemAccess
+ threshold: 60
+ force: yes
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+"""
+
+import json
+import logging
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+try:
+ from urlparse import urlparse, urlunparse
+except Exception:
+ from urllib.parse import urlparse, urlunparse
+
+
+class AuditLog(object):
+ """Audit-log module configuration class."""
+ MAX_RECORDS = 50000
+ HEADERS = {"Content-Type": "application/json",
+ "Accept": "application/json"}
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ max_records=dict(type="int", default=50000),
+ log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]),
+ full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]),
+ threshold=dict(type="int", default=90),
+ force=dict(type="bool", default=False),
+ log_path=dict(type='str', required=False)))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ args = self.module.params
+
+ self.max_records = args["max_records"]
+ if self.max_records < 100 or self.max_records > self.MAX_RECORDS:
+ self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]"
+ % self.max_records)
+ self.threshold = args["threshold"]
+ if self.threshold < 60 or self.threshold > 90:
+ self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold)
+ self.log_level = args["log_level"]
+ self.full_policy = args["full_policy"]
+ self.force = args["force"]
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ if not self.url.endswith('/'):
+ self.url += '/'
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ # logging setup
+ log_path = args['log_path']
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ self.proxy_used = self.is_proxy()
+ self._logger.info(self.proxy_used)
+ self.check_mode = self.module.check_mode
+
+ def is_proxy(self):
+ """Determine whether the API is embedded or proxy."""
+ try:
+
+ # replace http url path with devmgr/utils/about
+ about_url = list(urlparse(self.url))
+ about_url[2] = "devmgr/utils/about"
+ about_url = urlunparse(about_url)
+
+ rc, data = request(about_url, timeout=300, headers=self.HEADERS, **self.creds)
+
+ return data["runningAsProxy"]
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def get_configuration(self):
+ """Retrieve the existing audit-log configurations.
+
+ :returns: dictionary containing current audit-log configuration
+ """
+ try:
+ if self.proxy_used:
+ rc, data = request(self.url + "audit-log/config", timeout=300, headers=self.HEADERS, **self.creds)
+ else:
+ rc, data = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid,
+ timeout=300, headers=self.HEADERS, **self.creds)
+ return data
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve the audit-log configuration! "
+ "Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def build_configuration(self):
+ """Build audit-log expected configuration.
+
+ :returns: Tuple containing update boolean value and dictionary of audit-log configuration
+ """
+ config = self.get_configuration()
+
+ current = dict(auditLogMaxRecords=config["auditLogMaxRecords"],
+ auditLogLevel=config["auditLogLevel"],
+ auditLogFullPolicy=config["auditLogFullPolicy"],
+ auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"])
+
+ body = dict(auditLogMaxRecords=self.max_records,
+ auditLogLevel=self.log_level,
+ auditLogFullPolicy=self.full_policy,
+ auditLogWarningThresholdPct=self.threshold)
+
+ update = current != body
+
+ self._logger.info(pformat(update))
+ self._logger.info(pformat(body))
+ return update, body
+
+ def delete_log_messages(self):
+ """Delete all audit-log messages."""
+ self._logger.info("Deleting audit-log messages...")
+ try:
+ if self.proxy_used:
+ rc, result = request(self.url + "audit-log?clearAll=True", timeout=300,
+ method="DELETE", headers=self.HEADERS, **self.creds)
+ else:
+ rc, result = request(self.url + "storage-systems/%s/audit-log?clearAll=True" % self.ssid, timeout=300,
+ method="DELETE", headers=self.HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update_configuration(self, update=None, body=None, attempt_recovery=True):
+ """Update audit-log configuration."""
+ if update is None or body is None:
+ update, body = self.build_configuration()
+
+ if update and not self.check_mode:
+ try:
+ if self.proxy_used:
+ rc, result = request(self.url + "storage-systems/audit-log/config", timeout=300,
+ data=json.dumps(body), method='POST', headers=self.HEADERS,
+ ignore_errors=True, **self.creds)
+ else:
+ rc, result = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid, timeout=300,
+ data=json.dumps(body), method='POST', headers=self.HEADERS,
+ ignore_errors=True, **self.creds)
+
+ if rc == 422:
+ if self.force and attempt_recovery:
+ self.delete_log_messages()
+ update = self.update_configuration(update, body, False)
+ else:
+ self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(rc, result)))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+ return update
+
+ def update(self):
+ """Update the audit-log configuration."""
+ update = self.update_configuration()
+ self.module.exit_json(msg="Audit-log update complete", changed=update)
+
+ def __call__(self):
+ self.update()
+
+
+def main():
+ auditlog = AuditLog()
+ auditlog()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py
new file mode 100644
index 00000000..ac5c14c0
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_auth
+short_description: NetApp E-Series set or update the password for a storage array.
+description:
+ - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web
+ Services proxy. Note, all storage arrays do not have a Monitor or RO role.
+version_added: "2.2"
+author: Kevin Hulquest (@hulquest)
+options:
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ name:
+ description:
+ - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use
+ the ID instead.
+ required: False
+ type: str
+ ssid:
+ description:
+ - the identifier of the storage array in the Web Services Proxy.
+ required: False
+ type: str
+ set_admin:
+ description:
+ - Boolean value on whether to update the admin password. If set to false then the RO account is updated.
+ type: bool
+ default: False
+ current_password:
+ description:
+ - The current admin password. This is not required if the password hasn't been set before.
+ required: False
+ type: str
+ new_password:
+ description:
+ - The password you would like to set. Cannot be more than 30 characters.
+ required: True
+ type: str
+ api_url:
+ description:
+ - The full API url.
+ - "Example: http://ENDPOINT:8080/devmgr/v2"
+ - This can optionally be set via an environment variable, API_URL
+ required: False
+ type: str
+ api_username:
+ description:
+ - The username used to authenticate against the API
+ - This can optionally be set via an environment variable, API_USERNAME
+ required: False
+ type: str
+ api_password:
+ description:
+ - The password used to authenticate against the API
+ - This can optionally be set via an environment variable, API_PASSWORD
+ required: False
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Test module
+ netapp_e_auth:
+ name: trex
+ current_password: OldPasswd
+ new_password: NewPasswd
+ set_admin: yes
+ api_url: '{{ netapp_api_url }}'
+ api_username: '{{ netapp_api_username }}'
+ api_password: '{{ netapp_api_password }}'
+'''
+
+RETURN = '''
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: "Password Updated Successfully"
+'''
+import json
+import traceback
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "x-netapp-password-validate-method": "none"
+
+}
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as e:
+ r = e.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def get_ssid(module, name, api_url, user, pwd):
+ count = 0
+ all_systems = 'storage-systems'
+ systems_url = api_url + all_systems
+ rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd,
+ validate_certs=module.validate_certs)
+ for system in data:
+ if system['name'] == name:
+ count += 1
+ if count > 1:
+ module.fail_json(
+ msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
+ "Use the id instead")
+ else:
+ ssid = system['id']
+ else:
+ continue
+
+ if count == 0:
+ module.fail_json(msg="No storage array with the name %s was found" % name)
+
+ else:
+ return ssid
+
+
+def get_pwd_status(module, ssid, api_url, user, pwd):
+ pwd_status = "storage-systems/%s/passwords" % ssid
+ url = api_url + pwd_status
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd,
+ validate_certs=module.validate_certs)
+ return data['readOnlyPasswordSet'], data['adminPasswordSet']
+ except HTTPError as e:
+ module.fail_json(msg="There was an issue with connecting, please check that your "
+ "endpoint is properly defined and your credentials are correct: %s" % to_native(e))
+
+
+def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
+ """Update the stored storage-system password"""
+ update_pwd = 'storage-systems/%s' % ssid
+ url = api_url + update_pwd
+ post_body = json.dumps(dict(storedPassword=pwd))
+ try:
+ rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
+ url_password=api_pwd, validate_certs=module.validate_certs)
+ return rc, data
+ except Exception as e:
+ module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, to_native(e)))
+
+
+def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
+ """Set the storage-system password"""
+ set_pass = "storage-systems/%s/passwords" % ssid
+ url = api_url + set_pass
+
+ if not current_password:
+ current_password = ""
+
+ post_body = json.dumps(
+ dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
+
+ try:
+ rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
+ ignore_errors=True, validate_certs=module.validate_certs)
+ except Exception as e:
+ module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, to_native(e)),
+ exception=traceback.format_exc())
+
+ if rc == 422:
+ post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
+ try:
+ rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
+ validate_certs=module.validate_certs)
+ except Exception:
+ # TODO(lorenp): Resolve ignored rc, data
+ module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
+
+ if int(rc) >= 300:
+ module.fail_json(msg="Failed to set system password. Id [%s] Code [%s]. Error [%s]" % (ssid, rc, data))
+
+ rc, update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
+
+ if int(rc) < 300:
+ return update_data
+ else:
+ module.fail_json(msg="%s:%s" % (rc, update_data))
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=False, type='str'),
+ ssid=dict(required=False, type='str'),
+ current_password=dict(required=False, no_log=True),
+ new_password=dict(required=True, no_log=True),
+ set_admin=dict(required=True, type='bool'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
+ required_one_of=[['name', 'ssid']])
+
+ name = module.params['name']
+ ssid = module.params['ssid']
+ current_password = module.params['current_password']
+ new_password = module.params['new_password']
+ set_admin = module.params['set_admin']
+ user = module.params['api_username']
+ pwd = module.params['api_password']
+ api_url = module.params['api_url']
+ module.validate_certs = module.params['validate_certs']
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if name:
+ ssid = get_ssid(module, name, api_url, user, pwd)
+
+ ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
+
+ if admin_pwd and not current_password:
+ module.fail_json(
+ msg="Admin account has a password set. " +
+ "You must supply current_password in order to update the RO or Admin passwords")
+
+ if len(new_password) > 30:
+ module.fail_json(msg="Passwords must not be greater than 30 characters in length")
+
+ result = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
+ new_password=new_password, set_admin=set_admin)
+
+ module.exit_json(changed=True, msg="Password Updated Successfully",
+ password_set=result['passwordSet'],
+ password_status=result['passwordStatus'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py
new file mode 100644
index 00000000..e74bac77
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_drive_firmware
+version_added: "2.9"
+short_description: NetApp E-Series manage drive firmware
+description:
+ - Ensure drive firmware version is activated on specified drive model.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ firmware:
+ description:
+ - list of drive firmware file paths.
+ - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/
+ type: list
+ required: True
+ wait_for_completion:
+ description:
+ - This flag will cause module to wait for any upgrade actions to complete.
+ type: bool
+ default: false
+ ignore_inaccessible_drives:
+ description:
+ - This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible.
+ type: bool
+ default: false
+ upgrade_drives_online:
+ description:
+ - This flag will determine whether drive firmware can be upgrade while drives are accepting I/O.
+ - When I(upgrade_drives_online==False) stop all I/O before running task.
+ type: bool
+ default: true
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ nac_santricity_drive_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ firmware: "path/to/drive_firmware"
+ wait_for_completion: true
+ ignore_inaccessible_drives: false
+"""
+RETURN = """
+msg:
+ description: Whether any drive firmware was upgraded and whether it is in progress.
+ type: str
+ returned: always
+ sample:
+ { changed: True, upgrade_in_process: True }
+"""
+import os
+import re
+
+from time import sleep
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata
+from ansible.module_utils._text import to_native, to_text, to_bytes
+
+
+class NetAppESeriesDriveFirmware(NetAppESeriesModule):
+ WAIT_TIMEOUT_SEC = 60 * 15
+
+ def __init__(self):
+ ansible_options = dict(
+ firmware=dict(type="list", required=True),
+ wait_for_completion=dict(type="bool", default=False),
+ ignore_inaccessible_drives=dict(type="bool", default=False),
+ upgrade_drives_online=dict(type="bool", default=True))
+
+ super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.firmware_list = args["firmware"]
+ self.wait_for_completion = args["wait_for_completion"]
+ self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"]
+ self.upgrade_drives_online = args["upgrade_drives_online"]
+
+ self.upgrade_list_cache = None
+
+ self.upgrade_required_cache = None
+ self.upgrade_in_progress = False
+ self.drive_info_cache = None
+
+ def upload_firmware(self):
+ """Ensure firmware has been upload prior to uploaded."""
+ for firmware in self.firmware_list:
+ firmware_name = os.path.basename(firmware)
+ files = [("file", firmware_name, firmware)]
+ headers, data = create_multipart_formdata(files)
+ try:
+ rc, response = self.request("/files/drive", method="POST", headers=headers, data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error)))
+
+ def upgrade_list(self):
+ """Determine whether firmware is compatible with the specified drives."""
+ if self.upgrade_list_cache is None:
+ self.upgrade_list_cache = list()
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid)
+
+ # Create upgrade list, this ensures only the firmware uploaded is applied
+ for firmware in self.firmware_list:
+ filename = os.path.basename(firmware)
+
+ for uploaded_firmware in response["compatibilities"]:
+ if uploaded_firmware["filename"] == filename:
+
+ # Determine whether upgrade is required
+ drive_reference_list = []
+ for drive in uploaded_firmware["compatibleDrives"]:
+ try:
+ rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"]))
+
+ # Add drive references that are supported and differ from current firmware
+ if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and
+ uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]):
+
+ if self.ignore_inaccessible_drives or (not drive_info["offline"] and drive_info["available"]):
+ drive_reference_list.append(drive["driveRef"])
+
+ if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online:
+ self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]."
+ % (self.ssid, drive["driveRef"]))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]."
+ % (self.ssid, drive["driveRef"], to_native(error)))
+
+ if drive_reference_list:
+ self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}])
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ return self.upgrade_list_cache
+
+ def wait_for_upgrade_completion(self):
+ """Wait for drive firmware upgrade to complete."""
+ drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]]
+ last_status = None
+ for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)):
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid)
+
+ # Check drive status
+ for status in response["driveStatus"]:
+ last_status = status
+ if status["driveRef"] in drive_references:
+ if status["status"] == "okay":
+ continue
+ elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]:
+ break
+ else:
+ self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]."
+ % (self.ssid, status["driveRef"], status["status"]))
+ else:
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ sleep(5)
+ else:
+ self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status))
+
+ def upgrade(self):
+ """Apply firmware to applicable drives."""
+ try:
+ rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s"
+ % (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list())
+ self.upgrade_in_progress = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ if self.wait_for_completion:
+ self.wait_for_upgrade_completion()
+
+ def apply(self):
+ """Apply firmware policy has been enforced on E-Series storage system."""
+ self.upload_firmware()
+
+ if self.upgrade_list() and not self.module.check_mode:
+ self.upgrade()
+
+ self.module.exit_json(changed=True if self.upgrade_list() else False,
+ upgrade_in_process=self.upgrade_in_progress)
+
+
+def main():
+ drive_firmware = NetAppESeriesDriveFirmware()
+ drive_firmware.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py
new file mode 100644
index 00000000..3734a477
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py
@@ -0,0 +1,530 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+module: netapp_e_facts
+short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
+description:
+ - The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays.
+version_added: '2.2'
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+'''
+
+EXAMPLES = """
+---
+- name: Get array facts
+ netapp_e_facts:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+"""
+
+RETURN = """
+ msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample:
+ - Gathered facts for storage array. Array ID [1].
+ - Gathered facts for web services proxy.
+ storage_array_facts:
+ description: provides details about the array, controllers, management interfaces, hostside interfaces,
+ driveside interfaces, disks, storage pools, volumes, snapshots, and features.
+ returned: on successful inquiry from from embedded web services rest api
+ type: complex
+ contains:
+ netapp_controllers:
+ description: storage array controller list that contains basic controller identification and status
+ type: complex
+ sample:
+ - [{"name": "A", "serial": "021632007299", "status": "optimal"},
+ {"name": "B", "serial": "021632007300", "status": "failed"}]
+ netapp_disks:
+ description: drive list that contains identification, type, and status information for each drive
+ type: complex
+ sample:
+ - [{"available": false,
+ "firmware_version": "MS02",
+ "id": "01000000500003960C8B67880000000000000000",
+ "media_type": "ssd",
+ "product_id": "PX02SMU080 ",
+ "serial_number": "15R0A08LT2BA",
+ "status": "optimal",
+ "tray_ref": "0E00000000000000000000000000000000000000",
+ "usable_bytes": "799629205504" }]
+ netapp_driveside_interfaces:
+ description: drive side interface list that contains identification, type, and speed for each interface
+ type: complex
+ sample:
+ - [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }]
+ - [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }]
+ netapp_enabled_features:
+ description: specifies the enabled features on the storage array.
+ returned: on success
+ type: complex
+ sample:
+ - [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ]
+ netapp_host_groups:
+ description: specifies the host groups on the storage arrays.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }]
+ netapp_hosts:
+ description: specifies the hosts on the storage arrays.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "id": "8203800000000000000000000000000000000000",
+ "name": "host1",
+ "group_id": "85000000600A098000A4B28D003610705C40B964",
+ "host_type_index": 28,
+ "ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" },
+ { "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}]
+ netapp_host_types:
+ description: lists the available host types on the storage array.
+ returned: on success
+ type: complex
+ sample:
+ - [{ "index": 0, "type": "FactoryDefault" },
+ { "index": 1, "type": "W2KNETNCL"},
+ { "index": 2, "type": "SOL" },
+ { "index": 5, "type": "AVT_4M" },
+ { "index": 6, "type": "LNX" },
+ { "index": 7, "type": "LnxALUA" },
+ { "index": 8, "type": "W2KNETCL" },
+ { "index": 9, "type": "AIX MPIO" },
+ { "index": 10, "type": "VmwTPGSALUA" },
+ { "index": 15, "type": "HPXTPGS" },
+ { "index": 17, "type": "SolTPGSALUA" },
+ { "index": 18, "type": "SVC" },
+ { "index": 22, "type": "MacTPGSALUA" },
+ { "index": 23, "type": "WinTPGSALUA" },
+ { "index": 24, "type": "LnxTPGSALUA" },
+ { "index": 25, "type": "LnxTPGSALUA_PM" },
+ { "index": 26, "type": "ONTAP_ALUA" },
+ { "index": 27, "type": "LnxTPGSALUA_SF" },
+ { "index": 28, "type": "LnxDHALUA" },
+ { "index": 29, "type": "ATTOClusterAllOS" }]
+ netapp_hostside_interfaces:
+ description: host side interface list that contains identification, configuration, type, speed, and
+ status information for each interface
+ type: complex
+ sample:
+ - [{"iscsi":
+ [{ "controller": "A",
+ "current_interface_speed": "10g",
+ "ipv4_address": "10.10.10.1",
+ "ipv4_enabled": true,
+ "ipv4_gateway": "10.10.10.1",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "ipv6_enabled": false,
+ "iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76",
+ "link_status": "up",
+ "mtu": 9000,
+ "supported_interface_speeds": [ "10g" ] }]}]
+ netapp_management_interfaces:
+ description: management interface list that contains identification, configuration, and status for
+ each interface
+ type: complex
+ sample:
+ - [{"alias": "ict-2800-A",
+ "channel": 1,
+ "controller": "A",
+ "dns_config_method": "dhcp",
+ "dns_servers": [],
+ "ipv4_address": "10.1.1.1",
+ "ipv4_address_config_method": "static",
+ "ipv4_enabled": true,
+ "ipv4_gateway": "10.113.1.1",
+ "ipv4_subnet_mask": "255.255.255.0",
+ "ipv6_enabled": false,
+ "link_status": "up",
+ "mac_address": "00A098A81B5D",
+ "name": "wan0",
+ "ntp_config_method": "disabled",
+ "ntp_servers": [],
+ "remote_ssh_access": false }]
+ netapp_storage_array:
+ description: provides storage array identification, firmware version, and available capabilities
+ type: dict
+ sample:
+ - {"chassis_serial": "021540006043",
+ "firmware": "08.40.00.01",
+ "name": "ict-2800-11_40",
+ "wwn": "600A098000A81B5D0000000059D60C76",
+ "cacheBlockSizes": [4096,
+ 8192,
+ 16384,
+ 32768],
+ "supportedSegSizes": [8192,
+ 16384,
+ 32768,
+ 65536,
+ 131072,
+ 262144,
+ 524288]}
+ netapp_storage_pools:
+ description: storage pool list that contains identification and capacity information for each pool
+ type: complex
+ sample:
+ - [{"available_capacity": "3490353782784",
+ "id": "04000000600A098000A81B5D000002B45A953A61",
+ "name": "Raid6",
+ "total_capacity": "5399466745856",
+ "used_capacity": "1909112963072" }]
+ netapp_volumes:
+ description: storage volume list that contains identification and capacity information for each volume
+ type: complex
+ sample:
+ - [{"capacity": "5368709120",
+ "id": "02000000600A098000AAC0C3000002C45A952BAA",
+ "is_thin_provisioned": false,
+ "name": "5G",
+ "parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }]
+ netapp_workload_tags:
+ description: workload tag list
+ type: complex
+ sample:
+ - [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38",
+ "name": "ftp_server",
+ "workloadAttributes": [{"key": "use",
+ "value": "general"}]}]
+ netapp_volumes_by_initiators:
+ description: list of available volumes keyed by the mapped initiators.
+ type: complex
+ sample:
+ - {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E",
+ "meta_data": {"filetype": "xfs", "public": true},
+ "name": "some_volume",
+ "workload_name": "test2_volumes",
+ "wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]}
+ snapshot_images:
+ description: snapshot image list that contains identification, capacity, and status information for each
+ snapshot image
+ type: complex
+ sample:
+ - [{"active_cow": true,
+ "creation_method": "user",
+ "id": "34000000600A098000A81B5D00630A965B0535AC",
+ "pit_capacity": "5368709120",
+ "reposity_cap_utilization": "0",
+ "rollback_source": false,
+ "status": "optimal" }]
+"""
+
+from re import match
+from pprint import pformat
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule
+
+
+class Facts(NetAppESeriesModule):
+ def __init__(self):
+ web_services_version = "02.00.0000.0000"
+ super(Facts, self).__init__(ansible_options={},
+ web_services_version=web_services_version,
+ supports_check_mode=True)
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller references to their labels."""
+ controllers = list()
+ try:
+ rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, str(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[controller] = label
+ i += 1
+
+ return controllers_dict
+
+ def get_array_facts(self):
+ """Extract particular facts from the storage array graph"""
+ facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid)
+ controller_reference_label = self.get_controllers()
+ array_facts = None
+
+ # Get the storage array graph
+ try:
+ rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error)))
+
+ facts['netapp_storage_array'] = dict(
+ name=array_facts['sa']['saData']['storageArrayLabel'],
+ chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'],
+ firmware=array_facts['sa']['saData']['fwVersion'],
+ wwn=array_facts['sa']['saData']['saId']['worldWideName'],
+ segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'],
+ cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes'])
+
+ facts['netapp_controllers'] = [
+ dict(
+ name=controller_reference_label[controller['controllerRef']],
+ serial=controller['serialNumber'].strip(),
+ status=controller['status'],
+ ) for controller in array_facts['controller']]
+
+ facts['netapp_host_groups'] = [
+ dict(
+ id=group['id'],
+ name=group['name']
+ ) for group in array_facts['storagePoolBundle']['cluster']]
+
+ facts['netapp_hosts'] = [
+ dict(
+ group_id=host['clusterRef'],
+ hosts_reference=host['hostRef'],
+ id=host['id'],
+ name=host['name'],
+ host_type_index=host['hostTypeIndex'],
+ posts=host['hostSidePorts']
+ ) for host in array_facts['storagePoolBundle']['host']]
+
+ facts['netapp_host_types'] = [
+ dict(
+ type=host_type['hostType'],
+ index=host_type['index']
+ ) for host_type in array_facts['sa']['hostSpecificVals']
+ if 'hostType' in host_type.keys() and host_type['hostType']
+ # This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared.
+ ]
+ facts['snapshot_images'] = [
+ dict(
+ id=snapshot['id'],
+ status=snapshot['status'],
+ pit_capacity=snapshot['pitCapacity'],
+ creation_method=snapshot['creationMethod'],
+ reposity_cap_utilization=snapshot['repositoryCapacityUtilization'],
+ active_cow=snapshot['activeCOW'],
+ rollback_source=snapshot['isRollbackSource']
+ ) for snapshot in array_facts['highLevelVolBundle']['pit']]
+
+ facts['netapp_disks'] = [
+ dict(
+ id=disk['id'],
+ available=disk['available'],
+ media_type=disk['driveMediaType'],
+ status=disk['status'],
+ usable_bytes=disk['usableCapacity'],
+ tray_ref=disk['physicalLocation']['trayRef'],
+ product_id=disk['productID'],
+ firmware_version=disk['firmwareVersion'],
+ serial_number=disk['serialNumber'].lstrip()
+ ) for disk in array_facts['drive']]
+
+ facts['netapp_management_interfaces'] = [
+ dict(controller=controller_reference_label[controller['controllerRef']],
+ name=iface['ethernet']['interfaceName'],
+ alias=iface['ethernet']['alias'],
+ channel=iface['ethernet']['channel'],
+ mac_address=iface['ethernet']['macAddr'],
+ remote_ssh_access=iface['ethernet']['rloginEnabled'],
+ link_status=iface['ethernet']['linkStatus'],
+ ipv4_enabled=iface['ethernet']['ipv4Enabled'],
+ ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""),
+ ipv4_address=iface['ethernet']['ipv4Address'],
+ ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'],
+ ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'],
+ ipv6_enabled=iface['ethernet']['ipv6Enabled'],
+ dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'],
+ dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers']
+ if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []),
+ ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'],
+ ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers']
+ if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else [])
+ ) for controller in array_facts['controller'] for iface in controller['netInterfaces']]
+
+ facts['netapp_hostside_interfaces'] = [
+ dict(
+ fc=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['fibre']['channel'],
+ link_status=iface['fibre']['linkStatus'],
+ current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'fc'],
+ ib=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['ib']['channel'],
+ link_status=iface['ib']['linkState'],
+ mtu=iface['ib']['maximumTransmissionUnit'],
+ current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'ib'],
+ iscsi=[dict(controller=controller_reference_label[controller['controllerRef']],
+ iqn=iface['iscsi']['iqn'],
+ link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'],
+ ipv4_enabled=iface['iscsi']['ipv4Enabled'],
+ ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'],
+ ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'],
+ ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'],
+ ipv6_enabled=iface['iscsi']['ipv6Enabled'],
+ mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'],
+ current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData']
+ ['ethernetData']['currentInterfaceSpeed']),
+ supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData']
+ ['ethernetData']
+ ['supportedInterfaceSpeeds']))
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'iscsi'],
+ sas=[dict(controller=controller_reference_label[controller['controllerRef']],
+ channel=iface['sas']['channel'],
+ current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']),
+ maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']),
+ link_status=iface['sas']['iocPort']['state'])
+ for controller in array_facts['controller']
+ for iface in controller['hostInterfaces']
+ if iface['interfaceType'] == 'sas'])]
+
+ facts['netapp_driveside_interfaces'] = [
+ dict(
+ controller=controller_reference_label[controller['controllerRef']],
+ interface_type=interface['interfaceType'],
+ interface_speed=strip_interface_speed(
+ interface[interface['interfaceType']]['maximumInterfaceSpeed']
+ if (interface['interfaceType'] == 'sata' or
+ interface['interfaceType'] == 'sas' or
+ interface['interfaceType'] == 'fibre')
+ else (
+ interface[interface['interfaceType']]['currentSpeed']
+ if interface['interfaceType'] == 'ib'
+ else (
+ interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed']
+ if interface['interfaceType'] == 'iscsi' else 'unknown'
+ ))),
+ )
+ for controller in array_facts['controller']
+ for interface in controller['driveInterfaces']]
+
+ facts['netapp_storage_pools'] = [
+ dict(
+ id=storage_pool['id'],
+ name=storage_pool['name'],
+ available_capacity=storage_pool['freeSpace'],
+ total_capacity=storage_pool['totalRaidedSpace'],
+ used_capacity=storage_pool['usedSpace']
+ ) for storage_pool in array_facts['volumeGroup']]
+
+ all_volumes = list(array_facts['volume'])
+
+ facts['netapp_volumes'] = [
+ dict(
+ id=v['id'],
+ name=v['name'],
+ parent_storage_pool_id=v['volumeGroupRef'],
+ capacity=v['capacity'],
+ is_thin_provisioned=v['thinProvisioned'],
+ workload=v['metadata'],
+ ) for v in all_volumes]
+
+ workload_tags = None
+ try:
+ rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid)
+
+ facts['netapp_workload_tags'] = [
+ dict(
+ id=workload_tag['id'],
+ name=workload_tag['name'],
+ attributes=workload_tag['workloadAttributes']
+ ) for workload_tag in workload_tags]
+
+ # Create a dictionary of volume lists keyed by host names
+ facts['netapp_volumes_by_initiators'] = dict()
+ for mapping in array_facts['storagePoolBundle']['lunMapping']:
+ for host in facts['netapp_hosts']:
+ if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']:
+ if host['name'] not in facts['netapp_volumes_by_initiators'].keys():
+ facts['netapp_volumes_by_initiators'].update({host['name']: []})
+
+ for volume in all_volumes:
+ if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]:
+
+ # Determine workload name if there is one
+ workload_name = ""
+ metadata = dict()
+ for volume_tag in volume['metadata']:
+ if volume_tag['key'] == 'workloadId':
+ for workload_tag in facts['netapp_workload_tags']:
+ if volume_tag['value'] == workload_tag['id']:
+ workload_name = workload_tag['name']
+ metadata = dict((entry['key'], entry['value'])
+ for entry in workload_tag['attributes']
+ if entry['key'] != 'profileId')
+
+ facts['netapp_volumes_by_initiators'][host['name']].append(
+ dict(name=volume['name'],
+ id=volume['id'],
+ wwn=volume['wwn'],
+ workload_name=workload_name,
+ meta_data=metadata))
+
+ features = [feature for feature in array_facts['sa']['capabilities']]
+ features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures']
+ if feature['isEnabled']])
+ features = list(set(features)) # ensure unique
+ features.sort()
+ facts['netapp_enabled_features'] = features
+
+ return facts
+
+ def get_facts(self):
+ """Get the embedded or web services proxy information."""
+ facts = self.get_array_facts()
+
+ self.module.log("isEmbedded: %s" % self.is_embedded())
+ self.module.log(pformat(facts))
+
+ self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid,
+ storage_array_facts=facts)
+
+
+def strip_interface_speed(speed):
+ """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
+ if isinstance(speed, list):
+ result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
+ result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
+ result = ["auto" if match(r"auto", sp) else sp for sp in result]
+ else:
+ result = match(r"speed[0-9]{1,3}[gm]", speed)
+ result = result.group().replace("speed", "") if result else "unknown"
+ result = "auto" if match(r"auto", result.lower()) else result
+ return result
+
+
+def main():
+ facts = Facts()
+ facts.get_facts()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py
new file mode 100644
index 00000000..c2f7f745
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py
@@ -0,0 +1,488 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_firmware
+version_added: "2.9"
+short_description: NetApp E-Series manage firmware.
+description:
+ - Ensure specific firmware versions are activated on E-Series storage system.
+author:
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ nvsram:
+ description:
+ - Path to the NVSRAM file.
+ type: str
+ required: true
+ firmware:
+ description:
+ - Path to the firmware file.
+ type: str
+ required: true
+ wait_for_completion:
+ description:
+ - This flag will cause module to wait for any upgrade actions to complete.
+ type: bool
+ default: false
+ ignore_health_check:
+ description:
+ - This flag will force firmware to be activated in spite of the health check.
+ - Use at your own risk. Certain non-optimal states could result in data loss.
+ type: bool
+ default: false
+"""
+EXAMPLES = """
+- name: Ensure correct firmware versions
+ netapp_e_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ nvsram: "path/to/nvsram"
+ bundle: "path/to/bundle"
+ wait_for_completion: true
+- name: Ensure correct firmware versions
+ netapp_e_firmware:
+ ssid: "1"
+ api_url: "https://192.168.1.100:8443/devmgr/v2"
+ api_username: "admin"
+ api_password: "adminpass"
+ validate_certs: true
+ nvsram: "path/to/nvsram"
+ firmware: "path/to/firmware"
+"""
+RETURN = """
+msg:
+ description: Status and version of firmware and NVSRAM.
+ type: str
+ returned: always
+ sample:
+"""
+import os
+
+from time import sleep
+from ansible.module_utils import six
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata, request
+from ansible.module_utils._text import to_native, to_text, to_bytes
+
+
+class NetAppESeriesFirmware(NetAppESeriesModule):
+ HEALTH_CHECK_TIMEOUT_MS = 120000
+ REBOOT_TIMEOUT_SEC = 15 * 60
+ FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC = 60
+ DEFAULT_TIMEOUT = 60 * 15 # This will override the NetAppESeriesModule request method timeout.
+
+ def __init__(self):
+ ansible_options = dict(
+ nvsram=dict(type="str", required=True),
+ firmware=dict(type="str", required=True),
+ wait_for_completion=dict(type="bool", default=False),
+ ignore_health_check=dict(type="bool", default=False))
+
+ super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True)
+
+ args = self.module.params
+ self.nvsram = args["nvsram"]
+ self.firmware = args["firmware"]
+ self.wait_for_completion = args["wait_for_completion"]
+ self.ignore_health_check = args["ignore_health_check"]
+
+ self.nvsram_name = None
+ self.firmware_name = None
+ self.is_bundle_cache = None
+ self.firmware_version_cache = None
+ self.nvsram_version_cache = None
+ self.upgrade_required = False
+ self.upgrade_in_progress = False
+ self.module_info = dict()
+
+ self.nvsram_name = os.path.basename(self.nvsram)
+ self.firmware_name = os.path.basename(self.firmware)
+
+ def is_firmware_bundled(self):
+ """Determine whether supplied firmware is bundle."""
+ if self.is_bundle_cache is None:
+ with open(self.firmware, "rb") as fh:
+ signature = fh.read(16).lower()
+
+ if b"firmware" in signature:
+ self.is_bundle_cache = False
+ elif b"combined_content" in signature:
+ self.is_bundle_cache = True
+ else:
+ self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid))
+
+ return self.is_bundle_cache
+
+ def firmware_version(self):
+ """Retrieve firmware version of the firmware file. Return: bytes string"""
+ if self.firmware_version_cache is None:
+
+ # Search firmware file for bundle or firmware version
+ with open(self.firmware, "rb") as fh:
+ line = fh.readline()
+ while line:
+ if self.is_firmware_bundled():
+ if b'displayableAttributeList=' in line:
+ for item in line[25:].split(b','):
+ key, value = item.split(b"|")
+ if key == b'VERSION':
+ self.firmware_version_cache = value.strip(b"\n")
+ break
+ elif b"Version:" in line:
+ self.firmware_version_cache = line.split()[-1].strip(b"\n")
+ break
+ line = fh.readline()
+ else:
+ self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid))
+ return self.firmware_version_cache
+
+ def nvsram_version(self):
+ """Retrieve NVSRAM version of the NVSRAM file. Return: byte string"""
+ if self.nvsram_version_cache is None:
+
+ with open(self.nvsram, "rb") as fh:
+ line = fh.readline()
+ while line:
+ if b".NVSRAM Configuration Number" in line:
+ self.nvsram_version_cache = line.split(b'"')[-2]
+ break
+ line = fh.readline()
+ else:
+ self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid))
+ return self.nvsram_version_cache
+
+ def check_system_health(self):
+ """Ensure E-Series storage system is healthy. Works for both embedded and proxy web services."""
+ try:
+ rc, request_id = self.request("health-check", method="POST", data={"onlineOnly": True, "storageDeviceIds": [self.ssid]})
+
+ while True:
+ sleep(1)
+
+ try:
+ rc, response = self.request("health-check?requestId=%s" % request_id["requestId"])
+
+ if not response["healthCheckRunning"]:
+ return response["results"][0]["successful"]
+ elif int(response["results"][0]["processingTimeMS"]) > self.HEALTH_CHECK_TIMEOUT_MS:
+ self.module.fail_json(msg="Health check failed to complete. Array Id [%s]." % self.ssid)
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate health check. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % self.ssid)
+
+ def embedded_check_compatibility(self):
+ """Verify files are compatible with E-Series storage system."""
+ self.embedded_check_nvsram_compatibility()
+ self.embedded_check_bundle_compatibility()
+
+ def embedded_check_nvsram_compatibility(self):
+ """Verify the provided NVSRAM is compatible with E-Series storage system."""
+
+ # Check nvsram compatibility
+ try:
+ files = [("nvsramimage", self.nvsram_name, self.nvsram)]
+ headers, data = create_multipart_formdata(files=files)
+
+ rc, nvsram_compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid,
+ method="POST", data=data, headers=headers)
+
+ if not nvsram_compatible["signatureTestingPassed"]:
+ self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram)
+ if not nvsram_compatible["fileCompatible"]:
+ self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram)
+
+ # Determine whether nvsram is required
+ for module in nvsram_compatible["versionContents"]:
+ if module["bundledVersion"] != module["onboardVersion"]:
+ self.upgrade_required = True
+
+ # Update bundle info
+ self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ def embedded_check_bundle_compatibility(self):
+ """Verify the provided firmware bundle is compatible with E-Series storage system."""
+ try:
+ files = [("files[]", "blob", self.firmware)]
+ headers, data = create_multipart_formdata(files=files, send_8kb=True)
+ rc, bundle_compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid,
+ method="POST", data=data, headers=headers)
+
+ # Determine whether valid and compatible firmware
+ if not bundle_compatible["signatureTestingPassed"]:
+ self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware)
+ if not bundle_compatible["fileCompatible"]:
+ self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware)
+
+ # Determine whether upgrade is required
+ for module in bundle_compatible["versionContents"]:
+
+ bundle_module_version = module["bundledVersion"].split(".")
+ onboard_module_version = module["onboardVersion"].split(".")
+ version_minimum_length = min(len(bundle_module_version), len(onboard_module_version))
+ if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]:
+ self.upgrade_required = True
+
+ # Check whether downgrade is being attempted
+ bundle_version = module["bundledVersion"].split(".")[:2]
+ onboard_version = module["onboardVersion"].split(".")[:2]
+ if bundle_version[0] < onboard_version[0] or (bundle_version[0] == onboard_version[0] and bundle_version[1] < onboard_version[1]):
+ self.module.fail_json(msg="Downgrades are not permitted. onboard [%s] > bundled[%s]."
+ % (module["onboardVersion"], module["bundledVersion"]))
+
+ # Update bundle info
+ self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}})
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+
+ def embedded_wait_for_upgrade(self):
+ """Wait for SANtricity Web Services Embedded to be available after reboot."""
+ for count in range(0, self.REBOOT_TIMEOUT_SEC):
+ try:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData" % self.ssid)
+ bundle_display = [m["versionString"] for m in response[0]["extendedSAData"]["codeVersions"] if m["codeModule"] == "bundleDisplay"][0]
+ if rc == 200 and six.b(bundle_display) == self.firmware_version() and six.b(response[0]["nvsramVersion"]) == self.nvsram_version():
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ pass
+ sleep(1)
+ else:
+ self.module.fail_json(msg="Timeout waiting for Santricity Web Services Embedded. Array [%s]" % self.ssid)
+
+ def embedded_upgrade(self):
+ """Upload and activate both firmware and NVSRAM."""
+ files = [("nvsramfile", self.nvsram_name, self.nvsram),
+ ("dlpfile", self.firmware_name, self.firmware)]
+ headers, data = create_multipart_formdata(files=files)
+ try:
+ rc, response = self.request("firmware/embedded-firmware?staged=false&nvsram=true", method="POST", data=data, headers=headers)
+ self.upgrade_in_progress = True
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload and activate firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error)))
+ if self.wait_for_completion:
+ self.embedded_wait_for_upgrade()
+
+ def proxy_check_nvsram_compatibility(self):
+ """Verify nvsram is compatible with E-Series storage system."""
+ data = {"storageDeviceIds": [self.ssid]}
+ try:
+ rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
+ for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))):
+ sleep(5)
+ try:
+ rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
+ if not response["checkRunning"]:
+ for result in response["results"][0]["nvsramFiles"]:
+ if result["filename"] == self.nvsram_name:
+ return
+ self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def proxy_check_firmware_compatibility(self):
+ """Verify firmware is compatible with E-Series storage system."""
+ data = {"storageDeviceIds": [self.ssid]}
+ try:
+ rc, check = self.request("firmware/compatibility-check", method="POST", data=data)
+ for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))):
+ sleep(5)
+ try:
+ rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"])
+ if not response["checkRunning"]:
+ for result in response["results"][0]["cfwFiles"]:
+ if result["filename"] == self.firmware_name:
+ return
+ self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid))
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ def proxy_upload_and_check_compatibility(self):
+ """Ensure firmware is uploaded and verify compatibility."""
+ try:
+ rc, cfw_files = self.request("firmware/cfw-files")
+ for file in cfw_files:
+ if file["filename"] == self.nvsram_name:
+ break
+ else:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", self.nvsram_name, self.nvsram)]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]."
+ % (self.nvsram_name, self.ssid, to_native(error)))
+
+ self.proxy_check_nvsram_compatibility()
+
+ for file in cfw_files:
+ if file["filename"] == self.firmware_name:
+ break
+ else:
+ fields = [("validate", "true")]
+ files = [("firmwareFile", self.firmware_name, self.firmware)]
+ headers, data = create_multipart_formdata(files=files, fields=fields)
+ try:
+ rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]."
+ % (self.firmware_name, self.ssid, to_native(error)))
+
+ self.proxy_check_firmware_compatibility()
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve existing existing firmware files. Error [%s]" % to_native(error))
+
+ def proxy_check_upgrade_required(self):
+ """Staging is required to collect firmware information from the web services proxy."""
+ # Verify controller consistency and get firmware versions
+ try:
+ # Retrieve current bundle version
+ if self.is_firmware_bundled():
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid)
+ current_firmware_version = six.b(response[0]["versionString"])
+ else:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
+ current_firmware_version = six.b(response[0])
+
+ # Determine whether upgrade is required
+ if current_firmware_version != self.firmware_version():
+
+ current = current_firmware_version.split(b".")[:2]
+ upgrade = self.firmware_version().split(b".")[:2]
+ if current[0] < upgrade[0] or (current[0] == upgrade[0] and current[1] <= upgrade[1]):
+ self.upgrade_required = True
+ else:
+ self.module.fail_json(msg="Downgrades are not permitted. Firmware [%s]. Array [%s]." % (self.firmware, self.ssid))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+ # Determine current NVSRAM version and whether change is required
+ try:
+ rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
+ if six.b(response[0]) != self.nvsram_version():
+ self.upgrade_required = True
+
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ def proxy_wait_for_upgrade(self, request_id):
+ """Wait for SANtricity Web Services Proxy to report upgrade complete"""
+ if self.is_firmware_bundled():
+ while True:
+ try:
+ sleep(5)
+ rc, response = self.request("batch/cfw-upgrade/%s" % request_id)
+
+ if response["status"] == "complete":
+ self.upgrade_in_progress = False
+ break
+ elif response["status"] in ["failed", "cancelled"]:
+ self.module.fail_json(msg="Firmware upgrade failed to complete. Array [%s]." % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve firmware upgrade status. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+ else:
+ for count in range(0, int(self.REBOOT_TIMEOUT_SEC / 5)):
+ try:
+ sleep(5)
+ rc_firmware, firmware = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid)
+ rc_nvsram, nvsram = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid)
+
+ if six.b(firmware[0]) == self.firmware_version() and six.b(nvsram[0]) == self.nvsram_version():
+ self.upgrade_in_progress = False
+ break
+ except Exception as error:
+ pass
+ else:
+ self.module.fail_json(msg="Timed out waiting for firmware upgrade to complete. Array [%s]." % self.ssid)
+
+ def proxy_upgrade(self):
+ """Activate previously uploaded firmware related files."""
+ request_id = None
+ if self.is_firmware_bundled():
+ data = {"activate": True,
+ "firmwareFile": self.firmware_name,
+ "nvsramFile": self.nvsram_name,
+ "systemInfos": [{"systemId": self.ssid,
+ "allowNonOptimalActivation": self.ignore_health_check}]}
+ try:
+ rc, response = self.request("batch/cfw-upgrade", method="POST", data=data)
+ request_id = response["requestId"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ else:
+ data = {"stageFirmware": False,
+ "skipMelCheck": self.ignore_health_check,
+ "cfwFile": self.firmware_name,
+ "nvsramFile": self.nvsram_name}
+ try:
+ rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=data)
+ request_id = response["requestId"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error)))
+
+ self.upgrade_in_progress = True
+ if self.wait_for_completion:
+ self.proxy_wait_for_upgrade(request_id)
+
+ def apply(self):
+ """Upgrade controller firmware."""
+ self.check_system_health()
+
+ # Verify firmware compatibility and whether changes are required
+ if self.is_embedded():
+ self.embedded_check_compatibility()
+ else:
+ self.proxy_check_upgrade_required()
+
+ # This will upload the firmware files to the web services proxy but not to the controller
+ if self.upgrade_required:
+ self.proxy_upload_and_check_compatibility()
+
+ # Perform upgrade
+ if self.upgrade_required and not self.module.check_mode:
+ if self.is_embedded():
+ self.embedded_upgrade()
+ else:
+ self.proxy_upgrade()
+
+ self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress, status=self.module_info)
+
+
+def main():
+ firmware = NetAppESeriesFirmware()
+ firmware.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py
new file mode 100644
index 00000000..3ffacedd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py
@@ -0,0 +1,442 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+module: netapp_e_flashcache
+author: Kevin Hulquest (@hulquest)
+version_added: '2.2'
+short_description: NetApp E-Series manage SSD caches
+description:
+- Create or remove SSD caches on a NetApp E-Series storage array.
+options:
+ api_username:
+ required: true
+ type: str
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_password:
+ required: true
+ type: str
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ api_url:
+ required: true
+ type: str
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ required: true
+ type: str
+ description:
+ - The ID of the array to manage (as configured on the web services proxy).
+ state:
+ required: true
+ type: str
+ description:
+ - Whether the specified SSD cache should exist or not.
+ choices: ['present', 'absent']
+ default: present
+ name:
+ required: true
+ type: str
+ description:
+ - The name of the SSD cache to manage
+ io_type:
+ description:
+ - The type of workload to optimize the cache for.
+ choices: ['filesystem','database','media']
+ default: filesystem
+ type: str
+ disk_count:
+ type: int
+ description:
+ - The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place
+ disk_refs:
+ description:
+ - List of disk references
+ type: list
+ size_unit:
+ description:
+ - The unit to be applied to size arguments
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ default: gb
+ type: str
+ cache_size_min:
+ description:
+ - The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache.
+ type: int
+ criteria_disk_phy_type:
+ description:
+ - Type of physical disk
+ choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata']
+ type: str
+ log_mode:
+ type: str
+ description:
+ - Log mode
+ log_path:
+ type: str
+ description:
+ - Log path
+'''
+
+EXAMPLES = """
+ - name: Flash Cache
+ netapp_e_flashcache:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ name: SSDCacheBuiltByAnsible
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: json for newly created flash cache
+"""
+import json
+import logging
+import sys
+import traceback
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import reduce
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class NetAppESeriesFlashCache(object):
+ def __init__(self):
+ self.name = None
+ self.log_mode = None
+ self.log_path = None
+ self.api_url = None
+ self.api_username = None
+ self.api_password = None
+ self.ssid = None
+ self.validate_certs = None
+ self.disk_count = None
+ self.size_unit = None
+ self.cache_size_min = None
+ self.io_type = None
+ self.driveRefs = None
+ self.state = None
+ self._size_unit_map = dict(
+ bytes=1,
+ b=1,
+ kb=1024,
+ mb=1024 ** 2,
+ gb=1024 ** 3,
+ tb=1024 ** 4,
+ pb=1024 ** 5,
+ eb=1024 ** 6,
+ zb=1024 ** 7,
+ yb=1024 ** 8
+ )
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ ssid=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ disk_count=dict(type='int'),
+ disk_refs=dict(type='list'),
+ cache_size_min=dict(type='int'),
+ io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']),
+ size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
+ type='str'),
+ criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
+ type='str'),
+ log_mode=dict(type='str'),
+ log_path=dict(type='str'),
+ ))
+ self.module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+
+ ],
+ mutually_exclusive=[
+
+ ],
+ # TODO: update validation for various selection criteria
+ supports_check_mode=True
+ )
+
+ self.__dict__.update(self.module.params)
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ self.debug = self._logger.debug
+
+ if self.log_mode == 'file' and self.log_path:
+ logging.basicConfig(level=logging.DEBUG, filename=self.log_path)
+ elif self.log_mode == 'stderr':
+ logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
+
+ self.post_headers = dict(Accept="application/json")
+ self.post_headers['Content-Type'] = 'application/json'
+
+ def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None):
+ self.debug("getting candidate disks...")
+
+ drives_req = dict(
+ driveCount=disk_count,
+ sizeUnit=size_unit,
+ driveType='ssd',
+ )
+
+ if capacity:
+ drives_req['targetUsableCapacity'] = capacity
+
+ (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid),
+ data=json.dumps(drives_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ if rc == 204:
+ self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache')
+
+ disk_ids = [d['id'] for d in drives_resp]
+ bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0)
+
+ return (disk_ids, bytes)
+
+ def create_cache(self):
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit,
+ capacity=self.cache_size_min)
+
+ self.debug("creating ssd cache...")
+
+ create_fc_req = dict(
+ driveRefs=disk_ids,
+ name=self.name
+ )
+
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
+ data=json.dumps(create_fc_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def update_cache(self):
+ self.debug('updating flash cache config...')
+ update_fc_req = dict(
+ name=self.name,
+ configType=self.io_type
+ )
+
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid),
+ data=json.dumps(update_fc_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def delete_cache(self):
+ self.debug('deleting flash cache...')
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs, ignore_errors=True)
+
+ @property
+ def needs_more_disks(self):
+ if len(self.cache_detail['driveRefs']) < self.disk_count:
+ self.debug("needs resize: current disk count %s < requested requested count %s",
+ len(self.cache_detail['driveRefs']), self.disk_count)
+ return True
+
+ @property
+ def needs_less_disks(self):
+ if len(self.cache_detail['driveRefs']) > self.disk_count:
+ self.debug("needs resize: current disk count %s < requested requested count %s",
+ len(self.cache_detail['driveRefs']), self.disk_count)
+ return True
+
+ @property
+ def current_size_bytes(self):
+ return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity'])
+
+ @property
+ def requested_size_bytes(self):
+ if self.cache_size_min:
+ return self.cache_size_min * self._size_unit_map[self.size_unit]
+ else:
+ return 0
+
+ @property
+ def needs_more_capacity(self):
+ if self.current_size_bytes < self.requested_size_bytes:
+ self.debug("needs resize: current capacity %sb is less than requested minimum %sb",
+ self.current_size_bytes, self.requested_size_bytes)
+ return True
+
+ @property
+ def needs_resize(self):
+ return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks
+
+ def resize_cache(self):
+ # increase up to disk count first, then iteratively add disks until we meet requested capacity
+
+ # TODO: perform this calculation in check mode
+ current_disk_count = len(self.cache_detail['driveRefs'])
+ proposed_new_disks = 0
+
+ proposed_additional_bytes = 0
+ proposed_disk_ids = []
+
+ if self.needs_more_disks:
+ proposed_disk_count = self.disk_count - current_disk_count
+
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count)
+ proposed_additional_bytes = bytes
+ proposed_disk_ids = disk_ids
+
+ while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes:
+ proposed_new_disks += 1
+ (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks)
+ proposed_disk_ids = disk_ids
+ proposed_additional_bytes = bytes
+
+ add_drives_req = dict(
+ driveRef=proposed_disk_ids
+ )
+
+ self.debug("adding drives to flash-cache...")
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid),
+ data=json.dumps(add_drives_req), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ elif self.needs_less_disks and self.driveRefs:
+ rm_drives = dict(driveRef=self.driveRefs)
+ (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid),
+ data=json.dumps(rm_drives), headers=self.post_headers, method='POST',
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs)
+
+ def apply(self):
+ result = dict(changed=False)
+ (rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
+ url_username=self.api_username, url_password=self.api_password,
+ validate_certs=self.validate_certs, ignore_errors=True)
+
+ if rc == 200:
+ self.cache_detail = cache_resp
+ else:
+ self.cache_detail = None
+
+ if rc not in [200, 404]:
+ raise Exception(
+ "Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp))
+
+ if self.state == 'present':
+ if self.cache_detail:
+ # TODO: verify parameters against detail for changes
+ if self.cache_detail['name'] != self.name:
+ self.debug("CHANGED: name differs")
+ result['changed'] = True
+ if self.cache_detail['flashCacheBase']['configType'] != self.io_type:
+ self.debug("CHANGED: io_type differs")
+ result['changed'] = True
+ if self.needs_resize:
+ self.debug("CHANGED: resize required")
+ result['changed'] = True
+ else:
+ self.debug("CHANGED: requested state is 'present' but cache does not exist")
+ result['changed'] = True
+ else: # requested state is absent
+ if self.cache_detail:
+ self.debug("CHANGED: requested state is 'absent' but cache exists")
+ result['changed'] = True
+
+ if not result['changed']:
+ self.debug("no changes, exiting...")
+ self.module.exit_json(**result)
+
+ if self.module.check_mode:
+ self.debug("changes pending in check mode, exiting early...")
+ self.module.exit_json(**result)
+
+ if self.state == 'present':
+ if not self.cache_detail:
+ self.create_cache()
+ else:
+ if self.needs_resize:
+ self.resize_cache()
+
+ # run update here as well, since io_type can't be set on creation
+ self.update_cache()
+
+ elif self.state == 'absent':
+ self.delete_cache()
+
+ # TODO: include other details about the storage pool (size, type, id, etc)
+ self.module.exit_json(changed=result['changed'], **self.resp)
+
+
+def main():
+ sp = NetAppESeriesFlashCache()
+ try:
+ sp.apply()
+ except Exception as e:
+ sp.debug("Exception in apply(): \n%s", to_native(e))
+ sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py
new file mode 100644
index 00000000..1284b289
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_global
+short_description: NetApp E-Series manage global settings configuration
+description:
+ - Allow the user to configure several of the global settings associated with an E-Series storage-system
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ name:
+ description:
+ - Set the name of the E-Series storage-system
+ - This label/name doesn't have to be unique.
+ - May be up to 30 characters in length.
+ type: str
+ aliases:
+ - label
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ required: no
+ type: str
+notes:
+ - Check mode is supported.
+ - This module requires Web Services API v1.3 or newer.
+"""
+
+EXAMPLES = """
+ - name: Set the storage-system name
+ netapp_e_global:
+ name: myArrayName
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+name:
+ description:
+ - The current name/label of the storage-system.
+ returned: on success
+ sample: myArrayName
+ type: str
+"""
+import json
+import logging
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class GlobalSettings(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=False, aliases=['label']),
+ log_path=dict(type='str', required=False),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
+ args = self.module.params
+ self.name = args['name']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ if self.name and len(self.name) > 30:
+ self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.")
+
+ def get_name(self):
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds)
+ if result['status'] in ['offline', 'neverContacted']:
+ self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid))
+ return result['name']
+ except Exception as err:
+ self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ def update_name(self):
+ name = self.get_name()
+ update = False
+ if self.name != name:
+ update = True
+
+ body = dict(name=self.name)
+
+ if update and not self.check_mode:
+ try:
+ (rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST',
+ data=json.dumps(body), headers=HEADERS, **self.creds)
+ self._logger.info("Set name to %s.", result['name'])
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(
+ msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return update
+
+ def update(self):
+ update = self.update_name()
+ name = self.get_name()
+
+ self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name)
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ settings = GlobalSettings()
+ settings()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py
new file mode 100644
index 00000000..3d6b4a78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py
@@ -0,0 +1,544 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, NetApp Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_host
+short_description: NetApp E-Series manage eseries hosts
+description: Create, update, remove hosts on NetApp E-series storage arrays
+version_added: '2.2'
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ name:
+ description:
+ - If the host doesn't yet exist, the label/name to assign at creation time.
+ - If the hosts already exists, this will be used to uniquely identify the host to make any required changes
+ required: True
+ type: str
+ aliases:
+ - label
+ state:
+ description:
+ - Set to absent to remove an existing host
+ - Set to present to modify or create a new host definition
+ choices:
+ - absent
+ - present
+ default: present
+ type: str
+ version_added: 2.7
+ host_type:
+ description:
+ - This is the type of host to be mapped
+ - Required when C(state=present)
+ - Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a
+ host type index which can be found in M(netapp_eseries.santricity.netapp_e_facts)
+ type: str
+ aliases:
+ - host_type_index
+ ports:
+ description:
+ - A list of host ports you wish to associate with the host.
+ - Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are
+ uniquely identified by a label and these must be unique.
+ required: False
+ type: list
+ suboptions:
+ type:
+ description:
+ - The interface type of the port to define.
+ - Acceptable choices depend on the capabilities of the target hardware/software platform.
+ required: true
+ choices:
+ - iscsi
+ - sas
+ - fc
+ - ib
+ - nvmeof
+ - ethernet
+ label:
+ description:
+ - A unique label to assign to this port assignment.
+ required: true
+ port:
+ description:
+ - The WWN or IQN of the hostPort to assign to this port definition.
+ required: true
+ force_port:
+ description:
+ - Allow ports that are already assigned to be re-assigned to your current host
+ required: false
+ type: bool
+ version_added: 2.7
+ group:
+ description:
+ - The unique identifier of the host-group you want the host to be a member of; this is used for clustering.
+ required: False
+ type: str
+ aliases:
+ - cluster
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ required: False
+ type: str
+ version_added: 2.7
+"""
+
+EXAMPLES = """
+ - name: Define or update an existing host named 'Host1'
+ netapp_e_host:
+ ssid: "1"
+ api_url: "10.113.1.101:8443"
+ api_username: admin
+ api_password: myPassword
+ name: "Host1"
+ state: present
+ host_type_index: Linux DM-MP
+ ports:
+ - type: 'iscsi'
+ label: 'PORT_1'
+ port: 'iqn.1996-04.de.suse:01:56f86f9bd1fe'
+ - type: 'fc'
+ label: 'FC_1'
+ port: '10:00:FF:7C:FF:FF:FF:01'
+ - type: 'fc'
+ label: 'FC_2'
+ port: '10:00:FF:7C:FF:FF:FF:00'
+
+ - name: Ensure a host named 'Host2' doesn't exist
+ netapp_e_host:
+ ssid: "1"
+ api_url: "10.113.1.101:8443"
+ api_username: admin
+ api_password: myPassword
+ name: "Host2"
+ state: absent
+"""
+
+RETURN = """
+msg:
+ description:
+ - A user-readable description of the actions performed.
+ returned: on success
+ type: str
+ sample: The host has been created.
+id:
+ description:
+ - the unique identifier of the host on the E-Series storage-system
+ returned: on success when state=present
+ type: str
+ sample: 00000000600A098000AAC0C3003004700AD86A52
+ version_added: "2.6"
+
+ssid:
+ description:
+ - the unique identifier of the E-Series storage-system with the current api
+ returned: on success
+ type: str
+ sample: 1
+ version_added: "2.6"
+
+api_url:
+ description:
+ - the url of the API that this request was processed by
+ returned: on success
+ type: str
+ sample: https://webservices.example.com:8443
+ version_added: "2.6"
+"""
+import json
+import logging
+import re
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class Host(object):
+ HOST_TYPE_INDEXES = {"linux dm-mp": 28, "vmware": 10, "windows": 1, "windows clustered": 8}
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ group=dict(type='str', required=False, aliases=['cluster']),
+ ports=dict(type='list', required=False),
+ force_port=dict(type='bool', default=False),
+ name=dict(type='str', required=True, aliases=['label']),
+ host_type=dict(type='str', aliases=['host_type_index']),
+ log_path=dict(type='str', required=False),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ self.check_mode = self.module.check_mode
+ args = self.module.params
+ self.group = args['group']
+ self.ports = args['ports']
+ self.force_port = args['force_port']
+ self.name = args['name']
+ self.state = args['state']
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.certs = args['validate_certs']
+
+ self.post_body = dict()
+ self.all_hosts = list()
+ self.host_obj = dict()
+ self.newPorts = list()
+ self.portsForUpdate = list()
+ self.portsForRemoval = list()
+
+ # Update host type with the corresponding index
+ host_type = args['host_type_index']
+ if host_type:
+ host_type = host_type.lower()
+ if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]:
+ self.host_type_index = self.HOST_TYPE_INDEXES[host_type]
+ elif host_type.isdigit():
+ self.host_type_index = int(args['host_type_index'])
+ else:
+ self.module.fail_json(msg="host_type must be either a host type name or host type index found integer"
+ " the documentation.")
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+ if args['log_path']:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=args['log_path'], filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ # Ensure when state==present then host_type_index is defined
+ if self.state == "present" and self.host_type_index is None:
+ self.module.fail_json(msg="Host_type_index is required when state=='present'. Array Id: [%s]" % self.ssid)
+
+ # Fix port representation if they are provided with colons
+ if self.ports is not None:
+ for port in self.ports:
+ port['label'] = port['label'].lower()
+ port['type'] = port['type'].lower()
+ port['port'] = port['port'].lower()
+
+ # Determine whether address is 16-byte WWPN and, if so, remove
+ if re.match(r'^(0x)?[0-9a-f]{16}$', port['port'].replace(':', '')):
+ port['port'] = port['port'].replace(':', '').replace('0x', '')
+
+ def valid_host_type(self):
+ host_types = None
+ try:
+ (rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ try:
+ match = list(filter(lambda host_type: host_type['index'] == self.host_type_index, host_types))[0]
+ return True
+ except IndexError:
+ self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
+
+ def assigned_host_ports(self, apply_unassigning=False):
+ """Determine if the hostPorts requested have already been assigned and return list of required used ports."""
+ used_host_ports = {}
+ for host in self.all_hosts:
+ if host['label'] != self.name:
+ for host_port in host['hostSidePorts']:
+ for port in self.ports:
+ if port['port'] == host_port["address"] or port['label'] == host_port['label']:
+ if not self.force_port:
+ self.module.fail_json(msg="There are no host ports available OR there are not enough"
+ " unassigned host ports")
+ else:
+ # Determine port reference
+ port_ref = [port["hostPortRef"] for port in host["ports"]
+ if port["hostPortName"] == host_port["address"]]
+ port_ref.extend([port["initiatorRef"] for port in host["initiators"]
+ if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
+
+ # Create dictionary of hosts containing list of port references
+ if host["hostRef"] not in used_host_ports.keys():
+ used_host_ports.update({host["hostRef"]: port_ref})
+ else:
+ used_host_ports[host["hostRef"]].extend(port_ref)
+ else:
+ for host_port in host['hostSidePorts']:
+ for port in self.ports:
+ if ((host_port['label'] == port['label'] and host_port['address'] != port['port']) or
+ (host_port['label'] != port['label'] and host_port['address'] == port['port'])):
+ if not self.force_port:
+ self.module.fail_json(msg="There are no host ports available OR there are not enough"
+ " unassigned host ports")
+ else:
+ # Determine port reference
+ port_ref = [port["hostPortRef"] for port in host["ports"]
+ if port["hostPortName"] == host_port["address"]]
+ port_ref.extend([port["initiatorRef"] for port in host["initiators"]
+ if port["nodeName"]["iscsiNodeName"] == host_port["address"]])
+
+ # Create dictionary of hosts containing list of port references
+ if host["hostRef"] not in used_host_ports.keys():
+ used_host_ports.update({host["hostRef"]: port_ref})
+ else:
+ used_host_ports[host["hostRef"]].extend(port_ref)
+
+ # Unassign assigned ports
+ if apply_unassigning:
+ for host_ref in used_host_ports.keys():
+ try:
+ rc, resp = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, host_ref),
+ url_username=self.user, url_password=self.pwd, headers=HEADERS,
+ validate_certs=self.certs, method='POST',
+ data=json.dumps({"portsToRemove": used_host_ports[host_ref]}))
+ except Exception as err:
+ self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]."
+ " Error [%s]." % (self.host_obj['id'], self.ssid,
+ used_host_ports[host_ref], to_native(err)))
+
+ return used_host_ports
+
+ def group_id(self):
+ if self.group:
+ try:
+ (rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
+ url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ try:
+ group_obj = list(filter(lambda group: group['name'] == self.group, all_groups))[0]
+ return group_obj['id']
+ except IndexError:
+ self.module.fail_json(msg="No group with the name: %s exists" % self.group)
+ else:
+ # Return the value equivalent of no group
+ return "0000000000000000000000000000000000000000"
+
+ def host_exists(self):
+ """Determine if the requested host exists
+ As a side effect, set the full list of defined hosts in 'all_hosts', and the target host in 'host_obj'.
+ """
+ match = False
+ all_hosts = list()
+
+ try:
+ (rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
+ url_username=self.user, validate_certs=self.certs, headers=HEADERS)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ # Augment the host objects
+ for host in all_hosts:
+ for port in host['hostSidePorts']:
+ port['type'] = port['type'].lower()
+ port['address'] = port['address'].lower()
+ port['label'] = port['label'].lower()
+
+ # Augment hostSidePorts with their ID (this is an omission in the API)
+ ports = dict((port['label'], port['id']) for port in host['ports'])
+ ports.update((port['label'], port['id']) for port in host['initiators'])
+
+ for host_side_port in host['hostSidePorts']:
+ if host_side_port['label'] in ports:
+ host_side_port['id'] = ports[host_side_port['label']]
+
+ if host['label'] == self.name:
+ self.host_obj = host
+ match = True
+
+ self.all_hosts = all_hosts
+ return match
+
+ def needs_update(self):
+ """Determine whether we need to update the Host object
+ As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add
+ (newPorts), on self.
+ """
+ changed = False
+ if (self.host_obj["clusterRef"].lower() != self.group_id().lower() or
+ self.host_obj["hostTypeIndex"] != self.host_type_index):
+ self._logger.info("Either hostType or the clusterRef doesn't match, an update is required.")
+ changed = True
+ current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]})
+ for port in self.host_obj["hostSidePorts"])
+
+ if self.ports:
+ for port in self.ports:
+ for current_host_port_id in current_host_ports.keys():
+ if port == current_host_ports[current_host_port_id]:
+ current_host_ports.pop(current_host_port_id)
+ break
+ elif port["port"] == current_host_ports[current_host_port_id]["port"]:
+ if self.port_on_diff_host(port) and not self.force_port:
+ self.module.fail_json(msg="The port you specified [%s] is associated with a different host."
+ " Specify force_port as True or try a different port spec" % port)
+
+ if (port["label"] != current_host_ports[current_host_port_id]["label"] or
+ port["type"] != current_host_ports[current_host_port_id]["type"]):
+ current_host_ports.pop(current_host_port_id)
+ self.portsForUpdate.append({"portRef": current_host_port_id, "port": port["port"],
+ "label": port["label"], "hostRef": self.host_obj["hostRef"]})
+ break
+ else:
+ self.newPorts.append(port)
+
+ self.portsForRemoval = list(current_host_ports.keys())
+ changed = any([self.newPorts, self.portsForUpdate, self.portsForRemoval, changed])
+
+ return changed
+
+ def port_on_diff_host(self, arg_port):
+ """ Checks to see if a passed in port arg is present on a different host """
+ for host in self.all_hosts:
+ # Only check 'other' hosts
+ if host['name'] != self.name:
+ for port in host['hostSidePorts']:
+ # Check if the port label is found in the port dict list of each host
+ if arg_port['label'] == port['label'] or arg_port['port'] == port['address']:
+ self.other_host = host
+ return True
+ return False
+
+ def update_host(self):
+ self._logger.info("Beginning the update for host=%s.", self.name)
+
+ if self.ports:
+
+ # Remove ports that need reassigning from their current host.
+ self.assigned_host_ports(apply_unassigning=True)
+
+ self.post_body["portsToUpdate"] = self.portsForUpdate
+ self.post_body["ports"] = self.newPorts
+ self._logger.info("Requested ports: %s", pformat(self.ports))
+ else:
+ self._logger.info("No host ports were defined.")
+
+ if self.group:
+ self.post_body['groupId'] = self.group_id()
+
+ self.post_body['hostType'] = dict(index=self.host_type_index)
+
+ api = self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id'])
+ self._logger.info("POST => url=%s, body=%s.", api, pformat(self.post_body))
+
+ if not self.check_mode:
+ try:
+ (rc, self.host_obj) = request(api, url_username=self.user, url_password=self.pwd, headers=HEADERS,
+ validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=True, **payload)
+
+ def create_host(self):
+ self._logger.info("Creating host definition.")
+
+ # Remove ports that need reassigning from their current host.
+ self.assigned_host_ports(apply_unassigning=True)
+
+ # needs_reassignment = False
+ post_body = dict(
+ name=self.name,
+ hostType=dict(index=self.host_type_index),
+ groupId=self.group_id(),
+ )
+
+ if self.ports:
+ post_body.update(ports=self.ports)
+
+ api = self.url + "storage-systems/%s/hosts" % self.ssid
+ self._logger.info('POST => url=%s, body=%s', api, pformat(post_body))
+
+ if not self.check_mode:
+ if not self.host_exists():
+ try:
+ (rc, self.host_obj) = request(api, method='POST', url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ data=json.dumps(post_body), headers=HEADERS)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ else:
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload)
+
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=True, msg='Host created.', **payload)
+
+ def remove_host(self):
+ try:
+ (rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
+ method='DELETE',
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
+ self.ssid,
+ to_native(err)))
+
+ def build_success_payload(self, host=None):
+ keys = ['id']
+ if host is not None:
+ result = dict((key, host[key]) for key in keys)
+ else:
+ result = dict()
+ result['ssid'] = self.ssid
+ result['api_url'] = self.url
+ return result
+
+ def apply(self):
+ if self.state == 'present':
+ if self.host_exists():
+ if self.needs_update() and self.valid_host_type():
+ self.update_host()
+ else:
+ payload = self.build_success_payload(self.host_obj)
+ self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload)
+ elif self.valid_host_type():
+ self.create_host()
+ else:
+ payload = self.build_success_payload()
+ if self.host_exists():
+ self.remove_host()
+ self.module.exit_json(changed=True, msg="Host removed.", **payload)
+ else:
+ self.module.exit_json(changed=False, msg="Host already absent.", **payload)
+
+
+def main():
+ host = Host()
+ host.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py
new file mode 100644
index 00000000..87676106
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {"metadata_version": "1.1",
+ "status": ["deprecated"],
+ "supported_by": "community"}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_hostgroup
+version_added: "2.2"
+short_description: NetApp E-Series manage array host groups
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+description: Create, update or destroy host groups on a NetApp E-Series storage array.
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ required: true
+ description:
+ - Whether the specified host group should exist or not.
+ type: str
+ choices: ["present", "absent"]
+ name:
+ required: false
+ description:
+ - Name of the host group to manage
+ - This option is mutually exclusive with I(id).
+ type: str
+ new_name:
+ required: false
+ description:
+ - Specify this when you need to update the name of a host group
+ type: str
+ id:
+ required: false
+ description:
+ - Host reference identifier for the host group to manage.
+ - This option is mutually exclusive with I(name).
+ type: str
+ hosts:
+ required: false
+ description:
+ - List of host names/labels to add to the group
+ type: list
+"""
+EXAMPLES = """
+ - name: Configure Hostgroup
+ netapp_e_hostgroup:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+"""
+RETURN = """
+clusterRef:
+ description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
+ returned: always except when state is absent
+ type: str
+ sample: "3233343536373839303132333100000000000000"
+confirmLUNMappingCreation:
+ description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
+ will alter the volume access rights of other clusters, in addition to this one.
+ returned: always
+ type: bool
+ sample: false
+hosts:
+ description: A list of the hosts that are part of the host group after all operations.
+ returned: always except when state is absent
+ type: list
+ sample: ["HostA","HostB"]
+id:
+ description: The id number of the hostgroup
+ returned: always except when state is absent
+ type: str
+ sample: "3233343536373839303132333100000000000000"
+isSAControlled:
+ description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
+ indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
+ returned: always except when state is absent
+ type: bool
+ sample: false
+label:
+ description: The user-assigned, descriptive label string for the cluster.
+ returned: always
+ type: str
+ sample: "MyHostGroup"
+name:
+ description: same as label
+ returned: always except when state is absent
+ type: str
+ sample: "MyHostGroup"
+protectionInformationCapableAccessMethod:
+ description: This field is true if the host has a PI capable access method.
+ returned: always except when state is absent
+ type: bool
+ sample: true
+"""
+
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesHostGroup(NetAppESeriesModule):
+ EXPANSION_TIMEOUT_SEC = 10
+ DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(
+ state=dict(required=True, choices=["present", "absent"], type="str"),
+ name=dict(required=False, type="str"),
+ new_name=dict(required=False, type="str"),
+ id=dict(required=False, type="str"),
+ hosts=dict(required=False, type="list"))
+ mutually_exclusive = [["name", "id"]]
+ super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.name = args["name"]
+ self.new_name = args["new_name"]
+ self.id = args["id"]
+ self.hosts_list = args["hosts"]
+
+ self.current_host_group = None
+
+ @property
+ def hosts(self):
+ """Retrieve a list of host reference identifiers should be associated with the host group."""
+ host_list = []
+ existing_hosts = []
+
+ if self.hosts_list:
+ try:
+ rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ for host in self.hosts_list:
+ for existing_host in existing_hosts:
+ if host in existing_host["id"] or host in existing_host["name"]:
+ host_list.append(existing_host["id"])
+ break
+ else:
+ self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]."
+ % (self.ssid, host))
+
+ return host_list
+
+ @property
+ def host_groups(self):
+ """Retrieve a list of existing host groups."""
+ host_groups = []
+ hosts = []
+ try:
+ rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid)
+ rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups]
+ for group in host_groups:
+ hosts_ids = []
+ for host in hosts:
+ if group["id"] == host["clusterRef"]:
+ hosts_ids.append(host["hostRef"])
+ group.update({"hosts": hosts_ids})
+
+ return host_groups
+
+ @property
+ def current_hosts_in_host_group(self):
+ """Retrieve the current hosts associated with the current hostgroup."""
+ current_hosts = []
+ for group in self.host_groups:
+ if (self.name and group["name"] == self.name) or (self.id and group["id"] == self.id):
+ current_hosts = group["hosts"]
+
+ return current_hosts
+
+ def unassign_hosts(self, host_list=None):
+ """Unassign hosts from host group."""
+ if host_list is None:
+ host_list = self.current_host_group["hosts"]
+
+ for host_id in host_list:
+ try:
+ rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id),
+ method="POST", data={"group": "0000000000000000000000000000000000000000"})
+ except Exception as error:
+ self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]."
+ " Error[%s]." % (self.ssid, host_id, to_native(error)))
+
+ def delete_host_group(self, unassign_hosts=True):
+ """Delete host group"""
+ if unassign_hosts:
+ self.unassign_hosts()
+
+ try:
+ rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ def create_host_group(self):
+ """Create host group."""
+ data = {"name": self.name, "hosts": self.hosts}
+
+ response = None
+ try:
+ rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return response
+
+ def update_host_group(self):
+ """Update host group."""
+ data = {"name": self.new_name if self.new_name else self.name,
+ "hosts": self.hosts}
+
+ # unassign hosts that should not be part of the hostgroup
+ desired_host_ids = self.hosts
+ for host in self.current_hosts_in_host_group:
+ if host not in desired_host_ids:
+ self.unassign_hosts([host])
+
+ update_response = None
+ try:
+ rc, update_response = self.request("storage-systems/%s/host-groups/%s"
+ % (self.ssid, self.current_host_group["id"]), method="POST", data=data)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return update_response
+
+ def apply(self):
+ """Apply desired host group state to the storage array."""
+ changes_required = False
+
+ # Search for existing host group match
+ for group in self.host_groups:
+ if (self.id and group["id"] == self.id) or (self.name and group["name"] == self.name):
+ self.current_host_group = group
+
+ # Determine whether changes are required
+ if self.state == "present":
+ if self.current_host_group:
+ if (self.new_name and self.new_name != self.name) or self.hosts != self.current_host_group["hosts"]:
+ changes_required = True
+ else:
+ if not self.name:
+ self.module.fail_json(msg="The option name must be supplied when creating a new host group."
+ " Array id [%s]." % self.ssid)
+ changes_required = True
+
+ elif self.current_host_group:
+ changes_required = True
+
+ # Apply any necessary changes
+ msg = ""
+ if changes_required and not self.module.check_mode:
+ msg = "No changes required."
+ if self.state == "present":
+ if self.current_host_group:
+ if ((self.new_name and self.new_name != self.name) or
+ (self.hosts != self.current_host_group["hosts"])):
+ msg = self.update_host_group()
+ else:
+ msg = self.create_host_group()
+
+ elif self.current_host_group:
+ self.delete_host_group()
+ msg = "Host group deleted. Array Id [%s]. Host Name [%s]. Host Id [%s]."\
+ % (self.ssid, self.current_host_group["name"], self.current_host_group["id"])
+
+ self.module.exit_json(msg=msg, changed=changes_required)
+
+
+def main():
+ hostgroup = NetAppESeriesHostGroup()
+ hostgroup.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py
new file mode 100644
index 00000000..5e290f74
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py
@@ -0,0 +1,407 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_iscsi_interface
+short_description: NetApp E-Series manage iSCSI interface configuration
+description:
+ - Configure settings of an E-Series iSCSI interface
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are presented alphabetically, with the first controller as A,
+ the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
+ limitation and could change in the future.
+ required: yes
+ type: str
+ choices:
+ - A
+ - B
+ name:
+ description:
+ - The channel of the port to modify the configuration of.
+ - The list of choices is not necessarily comprehensive. It depends on the number of ports
+ that are available in the system.
+ - The numerical value represents the number of the channel (typically from left to right on the HIC),
+ beginning with a value of 1.
+ required: yes
+ type: int
+ aliases:
+ - channel
+ state:
+ description:
+ - When enabled, the provided configuration will be utilized.
+ - When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled.
+ choices:
+ - enabled
+ - disabled
+ default: enabled
+ type: str
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ config_method:
+ description:
+ - The configuration method type to use for this interface.
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ choices:
+ - dhcp
+ - static
+ default: dhcp
+ type: str
+ mtu:
+ description:
+ - The maximum transmission units (MTU), in bytes.
+ - This allows you to configure a larger value for the MTU, in order to enable jumbo frames
+ (any value > 1500).
+ - Generally, it is necessary to have your host, switches, and other components not only support jumbo
+ frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to
+ leave this at the default.
+ default: 1500
+ type: int
+ aliases:
+ - max_frame_size
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
+ via dhcp, etc), can take seconds or minutes longer to take effect.
+ - This module will not be useful/usable on an E-Series system without any iSCSI interfaces.
+ - This module requires a Web Services API version of >= 1.3.
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ netapp_e_iscsi_interface:
+ name: "1"
+ controller: "A"
+ config_method: static
+ address: "192.168.1.100"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.1.1"
+ ssid: "1"
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Disable ipv4 connectivity for the second port on the B controller
+ netapp_e_iscsi_interface:
+ name: "2"
+ controller: "B"
+ state: disabled
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+
+ - name: Enable jumbo frames for the first 4 ports on controller A
+ netapp_e_iscsi_interface:
+ name: "{{ item | int }}"
+ controller: "A"
+ state: enabled
+ mtu: 9000
+ config_method: dhcp
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ loop:
+ - 1
+ - 2
+ - 3
+ - 4
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+enabled:
+ description:
+ - Indicates whether IPv4 connectivity has been enabled or disabled.
+ - This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance,
+ it is unlikely that the configuration will actually be valid.
+ returned: on success
+ sample: True
+ type: bool
+"""
+import json
+import logging
+from pprint import pformat
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class IscsiInterface(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ controller=dict(type='str', required=True, choices=['A', 'B']),
+ name=dict(type='int', aliases=['channel']),
+ state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
+ address=dict(type='str', required=False),
+ subnet_mask=dict(type='str', required=False),
+ gateway=dict(type='str', required=False),
+ config_method=dict(type='str', required=False, default='dhcp', choices=['dhcp', 'static']),
+ mtu=dict(type='int', default=1500, required=False, aliases=['max_frame_size']),
+ log_path=dict(type='str', required=False),
+ ))
+
+ required_if = [
+ ["config_method", "static", ["address", "subnet_mask"]],
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, )
+ args = self.module.params
+ self.controller = args['controller']
+ self.name = args['name']
+ self.mtu = args['mtu']
+ self.state = args['state']
+ self.address = args['address']
+ self.subnet_mask = args['subnet_mask']
+ self.gateway = args['gateway']
+ self.config_method = args['config_method']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+ self.controllers = list()
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ if self.mtu < 1500 or self.mtu > 9000:
+ self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.")
+
+ if self.config_method == 'dhcp' and any([self.address, self.subnet_mask, self.gateway]):
+ self.module.fail_json(msg='A config_method of dhcp is mutually exclusive with the address,'
+ ' subnet_mask, and gateway options.')
+
+ # A relatively primitive regex to validate that the input is formatted like a valid ip address
+ address_regex = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}')
+
+ if self.address and not address_regex.match(self.address):
+ self.module.fail_json(msg="An invalid ip address was provided for address.")
+
+ if self.subnet_mask and not address_regex.match(self.subnet_mask):
+ self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.")
+
+ if self.gateway and not address_regex.match(self.gateway):
+ self.module.fail_json(msg="An invalid ip address was provided for gateway.")
+
+ @property
+ def interfaces(self):
+ ifaces = list()
+ try:
+ (rc, ifaces) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces'
+ % self.ssid, headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ # Filter out non-iSCSI interfaces
+ ifaces = [iface['iscsi'] for iface in ifaces if iface['interfaceType'] == 'iscsi']
+
+ return ifaces
+
+ def get_controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ {
+ 'A': '070000000000000000000001',
+ 'B': '070000000000000000000002',
+ }
+ :return: the controllers defined on the system
+ """
+ controllers = list()
+ try:
+ (rc, controllers) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/id'
+ % self.ssid, headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ controllers.sort()
+
+ controllers_dict = {}
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ controllers_dict[label] = controller
+ i += 1
+
+ return controllers_dict
+
+ def fetch_target_interface(self):
+ interfaces = self.interfaces
+
+ for iface in interfaces:
+ if iface['channel'] == self.name and self.controllers[self.controller] == iface['controllerId']:
+ return iface
+
+ channels = sorted(set((str(iface['channel'])) for iface in interfaces
+ if self.controllers[self.controller] == iface['controllerId']))
+
+ self.module.fail_json(msg="The requested channel of %s is not valid. Valid channels include: %s."
+ % (self.name, ", ".join(channels)))
+
+ def make_update_body(self, target_iface):
+ body = dict(iscsiInterface=target_iface['id'])
+ update_required = False
+
+ self._logger.info("Requested state=%s.", self.state)
+ self._logger.info("config_method: current=%s, requested=%s",
+ target_iface['ipv4Data']['ipv4AddressConfigMethod'], self.config_method)
+
+ if self.state == 'enabled':
+ settings = dict()
+ if not target_iface['ipv4Enabled']:
+ update_required = True
+ settings['ipv4Enabled'] = [True]
+ if self.mtu != target_iface['interfaceData']['ethernetData']['maximumFramePayloadSize']:
+ update_required = True
+ settings['maximumFramePayloadSize'] = [self.mtu]
+ if self.config_method == 'static':
+ ipv4Data = target_iface['ipv4Data']['ipv4AddressData']
+
+ if ipv4Data['ipv4Address'] != self.address:
+ update_required = True
+ settings['ipv4Address'] = [self.address]
+ if ipv4Data['ipv4SubnetMask'] != self.subnet_mask:
+ update_required = True
+ settings['ipv4SubnetMask'] = [self.subnet_mask]
+ if self.gateway is not None and ipv4Data['ipv4GatewayAddress'] != self.gateway:
+ update_required = True
+ settings['ipv4GatewayAddress'] = [self.gateway]
+
+ if target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configStatic':
+ update_required = True
+ settings['ipv4AddressConfigMethod'] = ['configStatic']
+
+ elif (target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configDhcp'):
+ update_required = True
+ settings.update(dict(ipv4Enabled=[True],
+ ipv4AddressConfigMethod=['configDhcp']))
+ body['settings'] = settings
+
+ else:
+ if target_iface['ipv4Enabled']:
+ update_required = True
+ body['settings'] = dict(ipv4Enabled=[False])
+
+ self._logger.info("Update required ?=%s", update_required)
+ self._logger.info("Update body: %s", pformat(body))
+
+ return update_required, body
+
+ def update(self):
+ self.controllers = self.get_controllers()
+ if self.controller not in self.controllers:
+ self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s."
+ % ", ".join(self.controllers.keys()))
+
+ iface_before = self.fetch_target_interface()
+ update_required, body = self.make_update_body(iface_before)
+ if update_required and not self.check_mode:
+ try:
+ url = (self.url +
+ 'storage-systems/%s/symbol/setIscsiInterfaceProperties' % self.ssid)
+ (rc, result) = request(url, method='POST', data=json.dumps(body), headers=HEADERS, timeout=300,
+ ignore_errors=True, **self.creds)
+ # We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook
+ # is cancelled mid-flight), that it isn't worth the complexity.
+ if rc == 422 and result['retcode'] in ['busy', '3']:
+ self.module.fail_json(
+ msg="The interface is currently busy (probably processing a previously requested modification"
+ " request). This operation cannot currently be completed. Array Id [%s]. Error [%s]."
+ % (self.ssid, result))
+ # Handle authentication issues, etc.
+ elif rc != 200:
+ self.module.fail_json(
+ msg="Failed to modify the interface! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(result)))
+ self._logger.debug("Update request completed successfully.")
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(
+ msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ iface_after = self.fetch_target_interface()
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update_required,
+ enabled=iface_after['ipv4Enabled'])
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ iface = IscsiInterface()
+ iface()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py
new file mode 100644
index 00000000..040d3e78
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_iscsi_target
+short_description: NetApp E-Series manage iSCSI target configuration
+description:
+ - Configure the settings of an E-Series iSCSI target
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ name:
+ description:
+ - The name/alias to assign to the iSCSI target.
+ - This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
+ type: str
+ aliases:
+ - alias
+ ping:
+ description:
+ - Enable ICMP ping responses from the configured iSCSI ports.
+ type: bool
+ default: yes
+ chap_secret:
+ description:
+ - Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
+ - When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
+ whether or not the password has changed.
+ - The chap secret may only use ascii characters with values between 32 and 126 decimal.
+ - The chap secret must be no less than 12 characters, but no greater than 57 characters in length.
+ - The chap secret is cleared when not specified or an empty string.
+ type: str
+ aliases:
+ - chap
+ - password
+ unnamed_discovery:
+ description:
+ - When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
+ discovery session if the iSCSI target iqn is not specified in the request.
+ - This option may be disabled to increase security if desired.
+ type: bool
+ default: yes
+ log_path:
+ description:
+ - A local path (on the Ansible controller), to a file to be used for debug logging.
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
+ M(netapp_eseries.santricity.netapp_e_iscsi_interface).
+ - This module requires a Web Services API version of >= 1.3.
+"""
+
+EXAMPLES = """
+ - name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
+ netapp_e_iscsi_target:
+ api_url: "https://localhost:8443/devmgr/v2"
+ api_username: admin
+ api_password: myPassword
+ ssid: "1"
+ validate_certs: no
+ name: myTarget
+ ping: yes
+ unnamed_discovery: yes
+
+ - name: Set the target alias and the CHAP secret
+ netapp_e_iscsi_target:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ name: myTarget
+ chap: password1234
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The iSCSI target settings have been updated.
+alias:
+ description:
+ - The alias assigned to the iSCSI target.
+ returned: on success
+ sample: myArray
+ type: str
+iqn:
+ description:
+ - The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
+ returned: on success
+ sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
+ type: str
+"""
+import json
+import logging
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class IscsiTarget(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=False, aliases=['alias']),
+ ping=dict(type='bool', required=False, default=True),
+ chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True),
+ unnamed_discovery=dict(type='bool', required=False, default=True),
+ log_path=dict(type='str', required=False),
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
+ args = self.module.params
+
+ self.name = args['name']
+ self.ping = args['ping']
+ self.chap_secret = args['chap_secret']
+ self.unnamed_discovery = args['unnamed_discovery']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'], )
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+ self.controllers = list()
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ if self.chap_secret:
+ if len(self.chap_secret) < 12 or len(self.chap_secret) > 57:
+ self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57"
+ " characters in length.")
+
+ for c in self.chap_secret:
+ ordinal = ord(c)
+ if ordinal < 32 or ordinal > 126:
+ self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
+ " characters with decimal values between 32 and 126.")
+
+ @property
+ def target(self):
+ """Provide information on the iSCSI Target configuration
+
+ Sample:
+ {
+ 'alias': 'myCustomName',
+ 'ping': True,
+ 'unnamed_discovery': True,
+ 'chap': False,
+ 'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45',
+ }
+ """
+ target = dict()
+ try:
+ (rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target'
+ % self.ssid, headers=HEADERS, **self.creds)
+ # This likely isn't an iSCSI-enabled system
+ if not data:
+ self.module.fail_json(
+ msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid))
+
+ data = data[0]
+
+ chap = any(
+ [auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap'])
+
+ target.update(dict(alias=data['alias']['iscsiAlias'],
+ iqn=data['nodeName']['iscsiNodeName'],
+ chap=chap))
+
+ (rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData'
+ % self.ssid, headers=HEADERS, **self.creds)
+
+ data = data[0]
+ target.update(dict(ping=data['icmpPingResponseEnabled'],
+ unnamed_discovery=data['unnamedDiscoverySessionsEnabled']))
+
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return target
+
+ def apply_iscsi_settings(self):
+ """Update the iSCSI target alias and CHAP settings"""
+ update = False
+ target = self.target
+
+ body = dict()
+
+ if self.name is not None and self.name != target['alias']:
+ update = True
+ body['alias'] = self.name
+
+ # If the CHAP secret was provided, we trigger an update.
+ if self.chap_secret:
+ update = True
+ body.update(dict(enableChapAuthentication=True,
+ chapSecret=self.chap_secret))
+ # If no secret was provided, then we disable chap
+ elif target['chap']:
+ update = True
+ body.update(dict(enableChapAuthentication=False))
+
+ if update and not self.check_mode:
+ try:
+ request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST',
+ data=json.dumps(body), headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return update
+
+ def apply_target_changes(self):
+ update = False
+ target = self.target
+
+ body = dict()
+
+ if self.ping != target['ping']:
+ update = True
+ body['icmpPingResponseEnabled'] = self.ping
+
+ if self.unnamed_discovery != target['unnamed_discovery']:
+ update = True
+ body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery
+
+ self._logger.info(pformat(body))
+ if update and not self.check_mode:
+ try:
+ request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST',
+ data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return update
+
+ def update(self):
+ update = self.apply_iscsi_settings()
+ update = self.apply_target_changes() or update
+
+ target = self.target
+ data = dict((key, target[key]) for key in target if key in ['iqn', 'alias'])
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ iface = IscsiTarget()
+ iface()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py
new file mode 100644
index 00000000..e3bb61e6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py
@@ -0,0 +1,401 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_ldap
+short_description: NetApp E-Series manage LDAP integration to use for authentication
+description:
+ - Configure an E-Series system to allow authentication via an LDAP server
+version_added: '2.7'
+author: Michael Price (@lmprice)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Enable/disable LDAP support on the system. Disabling will clear out any existing defined domains.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ identifier:
+ description:
+ - This is a unique identifier for the configuration (for cases where there are multiple domains configured).
+ - If this is not specified, but I(state=present), we will utilize a default value of 'default'.
+ type: str
+ username:
+ description:
+ - This is the user account that will be used for querying the LDAP server.
+ - "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com"
+ required: yes
+ type: str
+ aliases:
+ - bind_username
+ password:
+ description:
+ - This is the password for the bind user account.
+ required: yes
+ type: str
+ aliases:
+ - bind_password
+ attributes:
+ description:
+ - The user attributes that should be considered for the group to role mapping.
+ - Typically this is used with something like 'memberOf', and a user's access is tested against group
+ membership or lack thereof.
+ default: memberOf
+ type: list
+ server:
+ description:
+ - This is the LDAP server url.
+ - The connection string should be specified as using the ldap or ldaps protocol along with the port
+ information.
+ aliases:
+ - server_url
+ required: yes
+ type: str
+ name:
+ description:
+ - The domain name[s] that will be utilized when authenticating to identify which domain to utilize.
+ - Default to use the DNS name of the I(server).
+ - The only requirement is that the name[s] be resolvable.
+ - "Example: user@example.com"
+ required: no
+ type: list
+ search_base:
+ description:
+ - The search base is used to find group memberships of the user.
+ - "Example: ou=users,dc=example,dc=com"
+ required: yes
+ type: str
+ role_mappings:
+ description:
+ - This is where you specify which groups should have access to what permissions for the
+ storage-system.
+ - For example, all users in group A will be assigned all 4 available roles, which will allow access
+ to all the management functionality of the system (super-user). Those in group B only have the
+ storage.monitor role, which will allow only read-only access.
+ - This is specified as a mapping of regular expressions to a list of roles. See the examples.
+ - The roles that will be assigned to to the group/groups matching the provided regex.
+ - storage.admin allows users full read/write access to storage objects and operations.
+ - storage.monitor allows users read-only access to storage objects and operations.
+ - support.admin allows users access to hardware, diagnostic information, the Major Event
+ Log, and other critical support-related functionality, but not the storage configuration.
+ - security.admin allows users access to authentication/authorization configuration, as well
+ as the audit log configuration, and certification management.
+ type: dict
+ required: yes
+ user_attribute:
+ description:
+ - This is the attribute we will use to match the provided username when a user attempts to
+ authenticate.
+ type: str
+ default: sAMAccountName
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ required: no
+ type: str
+notes:
+ - Check mode is supported.
+ - This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for
+ authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given
+ different (or no), access to certain aspects of the system and API.
+ - The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible.
+ - Generally, you'll need to get the details of your organization's LDAP server before you'll be able to configure
+ the system for using LDAP authentication; every implementation is likely to be very different.
+ - This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy
+ v3.0 and higher.
+'''
+
+EXAMPLES = '''
+ - name: Disable LDAP authentication
+ netapp_e_ldap:
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+ ssid: "1"
+ state: absent
+
+ - name: Remove the 'default' LDAP domain configuration
+ netapp_e_ldap:
+ state: absent
+ identifier: default
+
+ - name: Define a new LDAP domain, utilizing defaults where possible
+ netapp_e_ldap:
+ state: present
+ bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com"
+ bind_password: "mySecretPass"
+ server: "ldap://example.com:389"
+ search_base: 'OU=Users,DC=example,DC=com'
+ role_mappings:
+ ".*dist-dev-storage.*":
+ - storage.admin
+ - security.admin
+ - support.admin
+ - storage.monitor
+'''
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The ldap settings have been updated.
+"""
+
+import json
+import logging
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+
+class Ldap(object):
+ NO_CHANGE_MSG = "No changes were necessary."
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent']),
+ identifier=dict(type='str', required=False, ),
+ username=dict(type='str', required=False, aliases=['bind_username']),
+ password=dict(type='str', required=False, aliases=['bind_password'], no_log=True),
+ name=dict(type='list', required=False, ),
+ server=dict(type='str', required=False, aliases=['server_url']),
+ search_base=dict(type='str', required=False, ),
+ role_mappings=dict(type='dict', required=False, ),
+ user_attribute=dict(type='str', required=False, default='sAMAccountName'),
+ attributes=dict(type='list', default=['memberOf'], required=False, ),
+ log_path=dict(type='str', required=False),
+ ))
+
+ required_if = [
+ ["state", "present", ["username", "password", "server", "search_base", "role_mappings", ]]
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
+ args = self.module.params
+ self.ldap = args['state'] == 'present'
+ self.identifier = args['identifier']
+ self.username = args['username']
+ self.password = args['password']
+ self.names = args['name']
+ self.server = args['server']
+ self.search_base = args['search_base']
+ self.role_mappings = args['role_mappings']
+ self.user_attribute = args['user_attribute']
+ self.attributes = args['attributes']
+
+ self.ssid = args['ssid']
+ self.url = args['api_url']
+ self.creds = dict(url_password=args['api_password'],
+ validate_certs=args['validate_certs'],
+ url_username=args['api_username'],
+ timeout=60)
+
+ self.check_mode = self.module.check_mode
+
+ log_path = args['log_path']
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ self.embedded = None
+ self.base_path = None
+
+ def make_configuration(self):
+ if not self.identifier:
+ self.identifier = 'default'
+
+ if not self.names:
+ parts = urlparse.urlparse(self.server)
+ netloc = parts.netloc
+ if ':' in netloc:
+ netloc = netloc.split(':')[0]
+ self.names = [netloc]
+
+ roles = list()
+ for regex in self.role_mappings:
+ for role in self.role_mappings[regex]:
+ roles.append(dict(groupRegex=regex,
+ ignoreCase=True,
+ name=role))
+
+ domain = dict(id=self.identifier,
+ ldapUrl=self.server,
+ bindLookupUser=dict(user=self.username, password=self.password),
+ roleMapCollection=roles,
+ groupAttributes=self.attributes,
+ names=self.names,
+ searchBase=self.search_base,
+ userAttribute=self.user_attribute,
+ )
+
+ return domain
+
+ def is_embedded(self):
+ """Determine whether or not we're using the embedded or proxy implementation of Web Services"""
+ if self.embedded is None:
+ url = self.url
+ try:
+ parts = urlparse.urlparse(url)
+ parts = parts._replace(path='/devmgr/utils/')
+ url = urlparse.urlunparse(parts)
+
+ (rc, result) = request(url + 'about', **self.creds)
+ self.embedded = not result['runningAsProxy']
+ except Exception as err:
+ self._logger.exception("Failed to retrieve the About information.")
+ self.module.fail_json(msg="Failed to determine the Web Services implementation type!"
+ " Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return self.embedded
+
+ def get_full_configuration(self):
+ try:
+ (rc, result) = request(self.url + self.base_path, **self.creds)
+ return result
+ except Exception as err:
+ self._logger.exception("Failed to retrieve the LDAP configuration.")
+ self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def get_configuration(self, identifier):
+ try:
+ (rc, result) = request(self.url + self.base_path + '%s' % (identifier), ignore_errors=True, **self.creds)
+ if rc == 200:
+ return result
+ elif rc == 404:
+ return None
+ else:
+ self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, result))
+ except Exception as err:
+ self._logger.exception("Failed to retrieve the LDAP configuration.")
+ self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ # Define a new domain based on the user input
+ domain = self.make_configuration()
+
+ # This is the current list of configurations
+ current = self.get_configuration(self.identifier)
+
+ update = current != domain
+ msg = "No changes were necessary for [%s]." % self.identifier
+ self._logger.info("Is updated: %s", update)
+ if update and not self.check_mode:
+ msg = "The configuration changes were made for [%s]." % self.identifier
+ try:
+ if current is None:
+ api = self.base_path + 'addDomain'
+ else:
+ api = self.base_path + '%s' % (domain['id'])
+
+ (rc, result) = request(self.url + api, method='POST', data=json.dumps(domain), **self.creds)
+ except Exception as err:
+ self._logger.exception("Failed to modify the LDAP configuration.")
+ self.module.fail_json(msg="Failed to modify LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ return msg, update
+
+ def clear_single_configuration(self, identifier=None):
+ if identifier is None:
+ identifier = self.identifier
+
+ configuration = self.get_configuration(identifier)
+ updated = False
+ msg = self.NO_CHANGE_MSG
+ if configuration:
+ updated = True
+ msg = "The LDAP domain configuration for [%s] was cleared." % identifier
+ if not self.check_mode:
+ try:
+ (rc, result) = request(self.url + self.base_path + '%s' % identifier, method='DELETE', **self.creds)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to remove LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return msg, updated
+
+ def clear_configuration(self):
+ configuration = self.get_full_configuration()
+ updated = False
+ msg = self.NO_CHANGE_MSG
+ if configuration['ldapDomains']:
+ updated = True
+ msg = "The LDAP configuration for all domains was cleared."
+ if not self.check_mode:
+ try:
+ (rc, result) = request(self.url + self.base_path, method='DELETE', ignore_errors=True, **self.creds)
+
+ # Older versions of NetApp E-Series restAPI does not possess an API to remove all existing configs
+ if rc == 405:
+ for config in configuration['ldapDomains']:
+ self.clear_single_configuration(config['id'])
+
+ except Exception as err:
+ self.module.fail_json(msg="Failed to clear LDAP configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ return msg, updated
+
+ def get_base_path(self):
+ embedded = self.is_embedded()
+ if embedded:
+ return 'storage-systems/%s/ldap/' % self.ssid
+ else:
+ return '/ldap/'
+
+ def update(self):
+ self.base_path = self.get_base_path()
+
+ if self.ldap:
+ msg, update = self.update_configuration()
+ elif self.identifier:
+ msg, update = self.clear_single_configuration()
+ else:
+ msg, update = self.clear_configuration()
+ self.module.exit_json(msg=msg, changed=update, )
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ settings = Ldap()
+ settings()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py
new file mode 100644
index 00000000..1b190ad3
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = '''
+---
+module: netapp_e_lun_mapping
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+short_description: NetApp E-Series create, delete, or modify lun mappings
+description:
+ - Create, delete, or modify mappings between a volume and a targeted host/host+ group.
+version_added: "2.2"
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Present will ensure the mapping exists, absent will remove the mapping.
+ required: True
+ type: str
+ choices: ["present", "absent"]
+ target:
+ description:
+ - The name of host or hostgroup you wish to assign to the mapping
+ - If omitted, the default hostgroup is used.
+ - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
+ type: str
+ required: False
+ volume_name:
+ description:
+ - The name of the volume you wish to include in the mapping.
+ required: True
+ type: str
+ aliases:
+ - volume
+ lun:
+ description:
+ - The LUN value you wish to give the mapping.
+ - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
+ - LUN value will be determine by the storage-system when not specified.
+ version_added: 2.7
+ type: int
+ required: no
+ target_type:
+ description:
+ - This option specifies the whether the target should be a host or a group of hosts
+ - Only necessary when the target name is used for both a host and a group of hosts
+ choices:
+ - host
+ - group
+ version_added: 2.7
+ type: str
+ required: no
+'''
+
+EXAMPLES = '''
+---
+ - name: Map volume1 to the host target host1
+ netapp_e_lun_mapping:
+ ssid: 1
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: no
+ state: present
+ target: host1
+ volume: volume1
+ - name: Delete the lun mapping between volume1 and host1
+ netapp_e_lun_mapping:
+ ssid: 1
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: yes
+ state: absent
+ target: host1
+ volume: volume1
+'''
+RETURN = '''
+msg:
+ description: success of the module
+ returned: always
+ type: str
+ sample: Lun mapping is complete
+'''
+import json
+import logging
+from pprint import pformat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json"
+}
+
+
+class LunMapping(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=["present", "absent"]),
+ target=dict(required=False, default=None),
+ volume_name=dict(required=True, aliases=["volume"]),
+ lun=dict(type="int", required=False),
+ target_type=dict(required=False, choices=["host", "group"])))
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ args = self.module.params
+
+ self.state = args["state"] in ["present"]
+ self.target = args["target"]
+ self.volume = args["volume_name"]
+ self.lun = args["lun"]
+ self.target_type = args["target_type"]
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.check_mode = self.module.check_mode
+ self.creds = dict(url_username=args["api_username"],
+ url_password=args["api_password"],
+ validate_certs=args["validate_certs"])
+ self.mapping_info = None
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ def update_mapping_info(self):
+ """Collect the current state of the storage array."""
+ response = None
+ try:
+ rc, response = request(self.url + "storage-systems/%s/graph" % self.ssid,
+ method="GET", headers=HEADERS, **self.creds)
+
+ except Exception as error:
+ self.module.fail_json(
+ msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error)))
+
+ # Create dictionary containing host/cluster references mapped to their names
+ target_reference = {}
+ target_name = {}
+ target_type = {}
+
+ if self.target_type is None or self.target_type == "host":
+ for host in response["storagePoolBundle"]["host"]:
+ target_reference.update({host["hostRef"]: host["name"]})
+ target_name.update({host["name"]: host["hostRef"]})
+ target_type.update({host["name"]: "host"})
+
+ if self.target_type is None or self.target_type == "group":
+ for cluster in response["storagePoolBundle"]["cluster"]:
+
+ # Verify there is no ambiguity between target's type (ie host and group has the same name)
+ if self.target and self.target_type is None and cluster["name"] == self.target and \
+ self.target in target_name.keys():
+ self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group"
+ " targets! Id [%s]" % self.ssid)
+
+ target_reference.update({cluster["clusterRef"]: cluster["name"]})
+ target_name.update({cluster["name"]: cluster["clusterRef"]})
+ target_type.update({cluster["name"]: "group"})
+
+ volume_reference = {}
+ volume_name = {}
+ lun_name = {}
+ for volume in response["volume"]:
+ volume_reference.update({volume["volumeRef"]: volume["name"]})
+ volume_name.update({volume["name"]: volume["volumeRef"]})
+ if volume["listOfMappings"]:
+ lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
+ for volume in response["highLevelVolBundle"]["thinVolume"]:
+ volume_reference.update({volume["volumeRef"]: volume["name"]})
+ volume_name.update({volume["name"]: volume["volumeRef"]})
+ if volume["listOfMappings"]:
+ lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]})
+
+ # Build current mapping object
+ self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"],
+ map_reference=mapping["mapRef"],
+ lun_mapping_reference=mapping["lunMappingRef"],
+ lun=mapping["lun"]
+ ) for mapping in response["storagePoolBundle"]["lunMapping"]],
+ volume_by_reference=volume_reference,
+ volume_by_name=volume_name,
+ lun_by_name=lun_name,
+ target_by_reference=target_reference,
+ target_by_name=target_name,
+ target_type_by_name=target_type)
+
+ def get_lun_mapping(self):
+ """Find the matching lun mapping reference.
+
+ Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun
+ """
+ target_match = False
+ reference = None
+ lun = None
+
+ self.update_mapping_info()
+
+ # Verify that when a lun is specified that it does not match an existing lun value unless it is associated with
+ # the specified volume (ie for an update)
+ if self.lun and any((self.lun == lun_mapping["lun"] and
+ self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and
+ self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]]
+ ) for lun_mapping in self.mapping_info["lun_mapping"]):
+ self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid)
+
+ # Verify that when target_type is specified then it matches the target's actually type
+ if self.target and self.target_type and self.target in self.mapping_info["target_type_by_name"].keys() and \
+ self.mapping_info["target_type_by_name"][self.target] != self.target_type:
+ self.module.fail_json(
+ msg="Option target does not match the specified target_type! Id [%s]." % self.ssid)
+
+ # Verify volume and target exist if needed for expected state.
+ if self.state:
+ if self.volume not in self.mapping_info["volume_by_name"].keys():
+ self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid)
+ if self.target and self.target not in self.mapping_info["target_by_name"].keys():
+ self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid)
+
+ for lun_mapping in self.mapping_info["lun_mapping"]:
+
+ # Find matching volume reference
+ if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]:
+ reference = lun_mapping["lun_mapping_reference"]
+ lun = lun_mapping["lun"]
+
+ # Determine if lun mapping is attached to target with the
+ if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and
+ self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and
+ (self.lun is None or lun == self.lun)):
+ target_match = True
+
+ return target_match, reference, lun
+
+ def update(self):
+ """Execute the changes the require changes on the storage array."""
+ target_match, lun_reference, lun = self.get_lun_mapping()
+ update = (self.state and not target_match) or (not self.state and target_match)
+
+ if update and not self.check_mode:
+ try:
+ if self.state:
+ body = dict()
+ target = None if not self.target else self.mapping_info["target_by_name"][self.target]
+ if target:
+ body.update(dict(targetId=target))
+ if self.lun is not None:
+ body.update(dict(lun=self.lun))
+
+ if lun_reference:
+
+ rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s/move"
+ % (self.ssid, lun_reference), method="POST", data=json.dumps(body),
+ headers=HEADERS, **self.creds)
+ else:
+ body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume]))
+ rc, response = request(self.url + "storage-systems/%s/volume-mappings" % self.ssid,
+ method="POST", data=json.dumps(body), headers=HEADERS, **self.creds)
+
+ else: # Remove existing lun mapping for volume and target
+ rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s"
+ % (self.ssid, lun_reference),
+ method="DELETE", headers=HEADERS, **self.creds)
+ except Exception as error:
+ self.module.fail_json(
+ msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+
+ self.module.exit_json(msg="Lun mapping is complete.", changed=update)
+
+
+def main():
+ lun_mapping = LunMapping()
+ lun_mapping.update()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py
new file mode 100644
index 00000000..58d133fa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py
@@ -0,0 +1,723 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_mgmt_interface
+short_description: NetApp E-Series management interface configuration
+description:
+ - Configure the E-Series management interfaces
+version_added: '2.7'
+author:
+ - Michael Price (@lmprice)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Enable or disable IPv4 network interface configuration.
+ - Either IPv4 or IPv6 must be enabled otherwise error will occur.
+ - Only required when enabling or disabling IPv4 network interface
+ choices:
+ - enable
+ - disable
+ required: no
+ type: str
+ aliases:
+ - enable_interface
+ controller:
+ description:
+ - The controller that owns the port you want to configure.
+ - Controller names are represented alphabetically, with the first controller as A,
+ the second as B, and so on.
+ - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard
+ limitation and could change in the future.
+ required: yes
+ type: str
+ choices:
+ - A
+ - B
+ name:
+ description:
+ - The port to modify the configuration for.
+ - The list of choices is not necessarily comprehensive. It depends on the number of ports
+ that are present in the system.
+ - The name represents the port number (typically from left to right on the controller),
+ beginning with a value of 1.
+ - Mutually exclusive with I(channel).
+ type: str
+ aliases:
+ - port
+ - iface
+ channel:
+ description:
+ - The port to modify the configuration for.
+ - The channel represents the port number (typically from left to right on the controller),
+ beginning with a value of 1.
+ - Mutually exclusive with I(name).
+ type: int
+ address:
+ description:
+ - The IPv4 address to assign to the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: no
+ subnet_mask:
+ description:
+ - The subnet mask to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: no
+ gateway:
+ description:
+ - The IPv4 gateway address to utilize for the interface.
+ - Should be specified in xx.xx.xx.xx form.
+ - Mutually exclusive with I(config_method=dhcp)
+ type: str
+ required: no
+ config_method:
+ description:
+ - The configuration method type to use for network interface ports.
+ - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway).
+ choices:
+ - dhcp
+ - static
+ type: str
+ required: no
+ dns_config_method:
+ description:
+ - The configuration method type to use for DNS services.
+ - dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup).
+ choices:
+ - dhcp
+ - static
+ type: str
+ required: no
+ dns_address:
+ description:
+ - Primary IPv4 DNS server address
+ type: str
+ required: no
+ dns_address_backup:
+ description:
+ - Backup IPv4 DNS server address
+ - Queried when primary DNS server fails
+ type: str
+ required: no
+ ntp_config_method:
+ description:
+ - The configuration method type to use for NTP services.
+ - disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
+ - dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup).
+ choices:
+ - disable
+ - dhcp
+ - static
+ type: str
+ required: no
+ ntp_address:
+ description:
+ - Primary IPv4 NTP server address
+ type: str
+ required: no
+ ntp_address_backup:
+ description:
+ - Backup IPv4 NTP server address
+ - Queried when primary NTP server fails
+ required: no
+ type: str
+ ssh:
+ type: bool
+ description:
+ - Enable ssh access to the controller for debug purposes.
+ - This is a controller-level setting.
+ - rlogin/telnet will be enabled for ancient equipment where ssh is not available.
+ required: no
+ log_path:
+ description:
+ - A local path to a file to be used for debug logging
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address
+ via dhcp, etc), can take seconds or minutes longer to take effect.
+ - "Known issue: Changes specifically to down ports will result in a failure. However, this may not be the case in up
+ coming NetApp E-Series firmware releases (released after firmware version 11.40.2)."
+"""
+
+EXAMPLES = """
+ - name: Configure the first port on the A controller with a static IPv4 address
+ netapp_e_mgmt_interface:
+ channel: 1
+ controller: "A"
+ config_method: static
+ address: "192.168.1.100"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.1.1"
+ ssid: "1"
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+
+ - name: Disable ipv4 connectivity for the second port on the B controller
+ netapp_e_mgmt_interface:
+ channel: 2
+ controller: "B"
+ enable_interface: no
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+
+ - name: Enable ssh access for ports one and two on controller A
+ netapp_e_mgmt_interface:
+ channel: {{ item }}
+ controller: "A"
+ ssh: yes
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ loop:
+ - 1
+ - 2
+
+ - name: Configure static DNS settings for the first port on controller A
+ netapp_e_mgmt_interface:
+ channel: 1
+ controller: "A"
+ dns_config_method: static
+ dns_address: "192.168.1.100"
+ dns_address_backup: "192.168.1.1"
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+
+ - name: Configure static NTP settings for ports one and two on controller B
+ netapp_e_mgmt_interface:
+ channel: {{ item }}
+ controller: "B"
+ ntp_config_method: static
+ ntp_address: "129.100.1.100"
+ ntp_address_backup: "127.100.1.1"
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ loop:
+ - 1
+ - 2
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The interface settings have been updated.
+enabled:
+ description:
+ - Indicates whether IPv4 connectivity has been enabled or disabled.
+ - This does not necessarily indicate connectivity. If dhcp was enabled absent a dhcp server, for instance,
+ it is unlikely that the configuration will actually be valid.
+ returned: on success
+ sample: True
+ type: bool
+"""
+import json
+import logging
+from pprint import pformat, pprint
+import time
+import socket
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class MgmtInterface(object):
+ MAX_RETRIES = 15
+
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type="str", choices=["enable", "disable"],
+ aliases=["enable_interface"], required=False),
+ controller=dict(type="str", required=True, choices=["A", "B"]),
+ name=dict(type="str", aliases=["port", "iface"]),
+ channel=dict(type="int"),
+ address=dict(type="str", required=False),
+ subnet_mask=dict(type="str", required=False),
+ gateway=dict(type="str", required=False),
+ config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
+ dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]),
+ dns_address=dict(type="str", required=False),
+ dns_address_backup=dict(type="str", required=False),
+ ntp_config_method=dict(type="str", required=False, choices=["disable", "dhcp", "static"]),
+ ntp_address=dict(type="str", required=False),
+ ntp_address_backup=dict(type="str", required=False),
+ ssh=dict(type="bool", required=False),
+ log_path=dict(type="str", required=False),
+ ))
+
+ required_if = [
+ ["state", "enable", ["config_method"]],
+ ["config_method", "static", ["address", "subnet_mask"]],
+ ["dns_config_method", "static", ["dns_address"]],
+ ["ntp_config_method", "static", ["ntp_address"]],
+ ]
+
+ mutually_exclusive = [
+ ["name", "channel"],
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive)
+ args = self.module.params
+
+ self.controller = args["controller"]
+ self.name = args["name"]
+ self.channel = args["channel"]
+
+ self.config_method = args["config_method"]
+ self.address = args["address"]
+ self.subnet_mask = args["subnet_mask"]
+ self.gateway = args["gateway"]
+ self.enable_interface = None if args["state"] is None else args["state"] == "enable"
+
+ self.dns_config_method = args["dns_config_method"]
+ self.dns_address = args["dns_address"]
+ self.dns_address_backup = args["dns_address_backup"]
+
+ self.ntp_config_method = args["ntp_config_method"]
+ self.ntp_address = args["ntp_address"]
+ self.ntp_address_backup = args["ntp_address_backup"]
+
+ self.ssh = args["ssh"]
+
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.creds = dict(url_password=args["api_password"],
+ validate_certs=args["validate_certs"],
+ url_username=args["api_username"], )
+
+ self.retries = 0
+
+ self.check_mode = self.module.check_mode
+ self.post_body = dict()
+
+ log_path = args["log_path"]
+
+ # logging setup
+ self._logger = logging.getLogger(self.__class__.__name__)
+
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ @property
+ def controllers(self):
+ """Retrieve a mapping of controller labels to their references
+ {
+ 'A': '070000000000000000000001',
+ 'B': '070000000000000000000002',
+ }
+ :return: the controllers defined on the system
+ """
+ try:
+ (rc, controllers) = request(self.url + 'storage-systems/%s/controllers'
+ % self.ssid, headers=HEADERS, **self.creds)
+ except Exception as err:
+ controllers = list()
+ self.module.fail_json(
+ msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ controllers.sort(key=lambda c: c['physicalLocation']['slot'])
+
+ controllers_dict = dict()
+ i = ord('A')
+ for controller in controllers:
+ label = chr(i)
+ settings = dict(controllerSlot=controller['physicalLocation']['slot'],
+ controllerRef=controller['controllerRef'],
+ ssh=controller['networkSettings']['remoteAccessEnabled'])
+ controllers_dict[label] = settings
+ i += 1
+
+ return controllers_dict
+
+ @property
+ def interface(self):
+ net_interfaces = list()
+ try:
+ (rc, net_interfaces) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces'
+ % self.ssid, headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ controllers = self.controllers
+ controller = controllers[self.controller]
+
+ net_interfaces = [iface for iface in net_interfaces if iface["controllerRef"] == controller["controllerRef"]]
+
+ # Find the correct interface
+ iface = None
+ for net in net_interfaces:
+ if self.name:
+ if net["alias"] == self.name or net["interfaceName"] == self.name:
+ iface = net
+ break
+ elif self.channel:
+ if net["channel"] == self.channel:
+ iface = net
+ break
+
+ if iface is None:
+ identifier = self.name if self.name is not None else self.channel
+ self.module.fail_json(msg="We could not find an interface matching [%s] on Array=[%s]."
+ % (identifier, self.ssid))
+
+ return dict(alias=iface["alias"],
+ channel=iface["channel"],
+ link_status=iface["linkStatus"],
+ enabled=iface["ipv4Enabled"],
+ address=iface["ipv4Address"],
+ gateway=iface["ipv4GatewayAddress"],
+ subnet_mask=iface["ipv4SubnetMask"],
+ dns_config_method=iface["dnsProperties"]["acquisitionProperties"]["dnsAcquisitionType"],
+ dns_servers=iface["dnsProperties"]["acquisitionProperties"]["dnsServers"],
+ ntp_config_method=iface["ntpProperties"]["acquisitionProperties"]["ntpAcquisitionType"],
+ ntp_servers=iface["ntpProperties"]["acquisitionProperties"]["ntpServers"],
+ config_method=iface["ipv4AddressConfigMethod"],
+ controllerRef=iface["controllerRef"],
+ controllerSlot=iface["controllerSlot"],
+ ipv6Enabled=iface["ipv6Enabled"],
+ id=iface["interfaceRef"], )
+
+ def get_enable_interface_settings(self, iface, expected_iface, update, body):
+ """Enable or disable the IPv4 network interface."""
+ if self.enable_interface:
+ if not iface["enabled"]:
+ update = True
+ body["ipv4Enabled"] = True
+ else:
+ if iface["enabled"]:
+ update = True
+ body["ipv4Enabled"] = False
+
+ expected_iface["enabled"] = body["ipv4Enabled"]
+ return update, expected_iface, body
+
+ def get_interface_settings(self, iface, expected_iface, update, body):
+ """Update network interface settings."""
+
+ if self.config_method == "dhcp":
+ if iface["config_method"] != "configDhcp":
+ update = True
+ body["ipv4AddressConfigMethod"] = "configDhcp"
+
+ else:
+ if iface["config_method"] != "configStatic":
+ update = True
+ body["ipv4AddressConfigMethod"] = "configStatic"
+
+ if iface["address"] != self.address:
+ update = True
+ body["ipv4Address"] = self.address
+
+ if iface["subnet_mask"] != self.subnet_mask:
+ update = True
+ body["ipv4SubnetMask"] = self.subnet_mask
+
+ if self.gateway and iface["gateway"] != self.gateway:
+ update = True
+ body["ipv4GatewayAddress"] = self.gateway
+
+ expected_iface["address"] = body["ipv4Address"]
+ expected_iface["subnet_mask"] = body["ipv4SubnetMask"]
+ expected_iface["gateway"] = body["ipv4GatewayAddress"]
+
+ expected_iface["config_method"] = body["ipv4AddressConfigMethod"]
+
+ return update, expected_iface, body
+
+ def get_dns_server_settings(self, iface, expected_iface, update, body):
+ """Add DNS server information to the request body."""
+ if self.dns_config_method == "dhcp":
+ if iface["dns_config_method"] != "dhcp":
+ update = True
+ body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="dhcp")
+
+ elif self.dns_config_method == "static":
+ dns_servers = [dict(addressType="ipv4", ipv4Address=self.dns_address)]
+ if self.dns_address_backup:
+ dns_servers.append(dict(addressType="ipv4", ipv4Address=self.dns_address_backup))
+
+ body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="stat", dnsServers=dns_servers)
+
+ if (iface["dns_config_method"] != "stat" or
+ len(iface["dns_servers"]) != len(dns_servers) or
+ (len(iface["dns_servers"]) == 2 and
+ (iface["dns_servers"][0]["ipv4Address"] != self.dns_address or
+ iface["dns_servers"][1]["ipv4Address"] != self.dns_address_backup)) or
+ (len(iface["dns_servers"]) == 1 and
+ iface["dns_servers"][0]["ipv4Address"] != self.dns_address)):
+ update = True
+
+ expected_iface["dns_servers"] = dns_servers
+
+ expected_iface["dns_config_method"] = body["dnsAcquisitionDescriptor"]["dnsAcquisitionType"]
+ return update, expected_iface, body
+
+ def get_ntp_server_settings(self, iface, expected_iface, update, body):
+ """Add NTP server information to the request body."""
+ if self.ntp_config_method == "disable":
+ if iface["ntp_config_method"] != "disabled":
+ update = True
+ body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="disabled")
+
+ elif self.ntp_config_method == "dhcp":
+ if iface["ntp_config_method"] != "dhcp":
+ update = True
+ body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="dhcp")
+
+ elif self.ntp_config_method == "static":
+ ntp_servers = [dict(addrType="ipvx", ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address))]
+ if self.ntp_address_backup:
+ ntp_servers.append(dict(addrType="ipvx",
+ ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address_backup)))
+
+ body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="stat", ntpServers=ntp_servers)
+
+ if (iface["ntp_config_method"] != "stat" or
+ len(iface["ntp_servers"]) != len(ntp_servers) or
+ ((len(iface["ntp_servers"]) == 2 and
+ (iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address or
+ iface["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup)) or
+ (len(iface["ntp_servers"]) == 1 and
+ iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address))):
+ update = True
+
+ expected_iface["ntp_servers"] = ntp_servers
+
+ expected_iface["ntp_config_method"] = body["ntpAcquisitionDescriptor"]["ntpAcquisitionType"]
+ return update, expected_iface, body
+
+ def get_remote_ssh_settings(self, settings, update, body):
+ """Configure network interface ports for remote ssh access."""
+ if self.ssh != settings["ssh"]:
+ update = True
+
+ body["enableRemoteAccess"] = self.ssh
+ return update, body
+
+ def update_array(self, settings, iface):
+ """Update controller with new interface, dns service, ntp service and/or remote ssh access information.
+
+ :returns: whether information passed will modify the controller's current state
+ :rtype: bool
+ """
+ update = False
+ body = dict(controllerRef=settings['controllerRef'],
+ interfaceRef=iface['id'])
+ expected_iface = iface.copy()
+
+ # Check if api url is using the effected management interface to change itself
+ update_used_matching_address = False
+ if self.enable_interface and self.config_method:
+ netloc = list(urlparse.urlparse(self.url))[1]
+ address = netloc.split(":")[0]
+ address_info = socket.getaddrinfo(address, 8443)
+ url_address_info = socket.getaddrinfo(iface["address"], 8443)
+ update_used_matching_address = any(info in url_address_info for info in address_info)
+
+ self._logger.info("update_used_matching_address: %s", update_used_matching_address)
+
+ # Populate the body of the request and check for changes
+ if self.enable_interface is not None:
+ update, expected_iface, body = self.get_enable_interface_settings(iface, expected_iface, update, body)
+
+ if self.config_method is not None:
+ update, expected_iface, body = self.get_interface_settings(iface, expected_iface, update, body)
+
+ if self.dns_config_method is not None:
+ update, expected_iface, body = self.get_dns_server_settings(iface, expected_iface, update, body)
+
+ if self.ntp_config_method is not None:
+ update, expected_iface, body = self.get_ntp_server_settings(iface, expected_iface, update, body)
+
+ if self.ssh is not None:
+ update, body = self.get_remote_ssh_settings(settings, update, body)
+ iface["ssh"] = self.ssh
+ expected_iface["ssh"] = self.ssh
+
+ # debug information
+ self._logger.info(pformat(body))
+ self._logger.info(pformat(iface))
+ self._logger.info(pformat(expected_iface))
+
+ if self.check_mode:
+ return update
+
+ if update and not self.check_mode:
+ if not update_used_matching_address:
+ try:
+ (rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces'
+ % self.ssid, method='POST', data=json.dumps(body), headers=HEADERS,
+ timeout=300, ignore_errors=True, **self.creds)
+ if rc == 422:
+ if data['retcode'] == "4" or data['retcode'] == "illegalParam":
+ if not (body['ipv4Enabled'] or iface['ipv6Enabled']):
+ self.module.fail_json(msg="This storage-system already has IPv6 connectivity disabled. "
+ "DHCP configuration for IPv4 is required at a minimum."
+ " Array Id [%s] Message [%s]."
+ % (self.ssid, data['errorMessage']))
+ else:
+ self.module.fail_json(msg="We failed to configure the management interface. Array Id "
+ "[%s] Message [%s]." % (self.ssid, data))
+ elif rc >= 300:
+ self.module.fail_json(
+ msg="We failed to configure the management interface. Array Id [%s] Message [%s]." %
+ (self.ssid, data))
+
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(
+ msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+ else:
+ self.update_api_address_interface_match(body)
+
+ return self.validate_changes(expected_iface) if update and iface["link_status"] != "up" else update
+
+ def update_api_address_interface_match(self, body):
+ """Change network interface address which matches the api_address"""
+ try:
+ try:
+ (rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid,
+ use_proxy=False, force=True, ignore_errors=True, method='POST',
+ data=json.dumps(body), headers=HEADERS, timeout=10, **self.creds)
+ except Exception:
+ url_parts = list(urlparse.urlparse(self.url))
+ domain = url_parts[1].split(":")
+ domain[0] = self.address
+ url_parts[1] = ":".join(domain)
+ expected_url = urlparse.urlunparse(url_parts)
+ self._logger.info(pformat(expected_url))
+
+ (rc, data) = request(expected_url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid,
+ headers=HEADERS, timeout=300, **self.creds)
+ return
+ except Exception as err:
+ self._logger.info(type(err))
+ self.module.fail_json(
+ msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def validate_changes(self, expected_iface, retry=6):
+ """Validate interface changes were applied to the controller interface port. 30 second timeout"""
+ if self.interface != expected_iface:
+ time.sleep(5)
+ if retry:
+ return self.validate_changes(expected_iface, retry - 1)
+
+ self.module.fail_json(msg="Update failure: we failed to verify the necessary state change.")
+
+ return True
+
+ def check_health(self):
+ """It's possible, due to a previous operation, for the API to report a 424 (offline) status for the
+ storage-system. Therefore, we run a manual check with retries to attempt to contact the system before we
+ continue.
+ """
+ try:
+ (rc, data) = request(self.url + 'storage-systems/%s/controllers'
+ % self.ssid, headers=HEADERS,
+ ignore_errors=True, **self.creds)
+
+ # We've probably recently changed the interface settings and it's still coming back up: retry.
+ if rc == 424:
+ if self.retries < self.MAX_RETRIES:
+ self.retries += 1
+ self._logger.info("We hit a 424, retrying in 5s.")
+ time.sleep(5)
+ self.check_health()
+ else:
+ self.module.fail_json(
+ msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." %
+ (self.ssid, data))
+ elif rc >= 300:
+ self.module.fail_json(
+ msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." %
+ (self.ssid, data))
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ if self.retries < self.MAX_RETRIES:
+ self._logger.info("We hit a connection failure, retrying in 5s.")
+ self.retries += 1
+ time.sleep(5)
+ self.check_health()
+ else:
+ self.module.fail_json(
+ msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update(self):
+ """Update storage system with necessary changes."""
+ # Check if the storage array can be contacted
+ self.check_health()
+
+ # make the necessary changes to the storage system
+ settings = self.controllers[self.controller]
+ iface = self.interface
+ self._logger.info(pformat(settings))
+ self._logger.info(pformat(iface))
+ update = self.update_array(settings, iface)
+
+ self.module.exit_json(msg="The interface settings have been updated.", changed=update)
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ iface = MgmtInterface()
+ iface()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py
new file mode 100644
index 00000000..8bcee43f
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py
@@ -0,0 +1,376 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_group
+short_description: NetApp E-Series manage snapshot groups
+description:
+ - Create, update, delete snapshot groups for NetApp E-series storage arrays
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ ssid:
+ description:
+ - Storage system identifier
+ type: str
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ state:
+ description:
+ - Whether to ensure the group is present or absent.
+ required: True
+ type: str
+ choices:
+ - present
+ - absent
+ name:
+ description:
+ - The name to give the snapshot group
+ type: str
+ required: True
+ base_volume_name:
+ description:
+ - The name of the base volume or thin volume to use as the base for the new snapshot group.
+ - If a snapshot group with an identical C(name) already exists but with a different base volume
+ an error will be returned.
+ type: str
+ required: True
+ repo_pct:
+ description:
+ - The size of the repository in relation to the size of the base volume
+ required: False
+ type: int
+ default: 20
+ warning_threshold:
+ description:
+ - The repository utilization warning threshold, as a percentage of the repository volume capacity.
+ required: False
+ type: int
+ default: 80
+ delete_limit:
+ description:
+ - The automatic deletion indicator.
+ - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of
+ snapshot images limited to the number specified.
+ - This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
+ required: False
+ type: int
+ default: 30
+ full_policy:
+ description:
+ - The behavior on when the data repository becomes full.
+ - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
+ required: False
+ default: purgepit
+ type: str
+ choices: ['unknown', 'failbasewrites', 'purgepit']
+ storage_pool_name:
+ required: True
+ description:
+ - The name of the storage pool on which to allocate the repository volume.
+ type: str
+ rollback_priority:
+ required: False
+ description:
+ - The importance of the rollback operation.
+ - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
+ choices: ['highest', 'high', 'medium', 'low', 'lowest']
+ type: str
+ default: medium
+"""
+
+EXAMPLES = """
+ - name: Configure Snapshot group
+ netapp_e_snapshot_group:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ base_volume_name: SSGroup_test
+ name=: OOSS_Group
+ repo_pct: 20
+ warning_threshold: 85
+ delete_limit: 30
+ full_policy: purgepit
+ storage_pool_name: Disk_Pool_1
+ rollback_priority: medium
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: json facts for newly created snapshot group.
+"""
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class SnapshotGroup(object):
+ def __init__(self):
+
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ base_volume_name=dict(required=True),
+ name=dict(required=True),
+ repo_pct=dict(default=20, type='int'),
+ warning_threshold=dict(default=80, type='int'),
+ delete_limit=dict(default=30, type='int'),
+ full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
+ rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
+ storage_pool_name=dict(type='str'),
+ ssid=dict(required=True),
+ )
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+
+ self.post_data = dict()
+ self.warning_threshold = self.module.params['warning_threshold']
+ self.base_volume_name = self.module.params['base_volume_name']
+ self.name = self.module.params['name']
+ self.repo_pct = self.module.params['repo_pct']
+ self.delete_limit = self.module.params['delete_limit']
+ self.full_policy = self.module.params['full_policy']
+ self.rollback_priority = self.module.params['rollback_priority']
+ self.storage_pool_name = self.module.params['storage_pool_name']
+ self.state = self.module.params['state']
+
+ self.url = self.module.params['api_url']
+ self.user = self.module.params['api_username']
+ self.pwd = self.module.params['api_password']
+ self.certs = self.module.params['validate_certs']
+ self.ssid = self.module.params['ssid']
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ self.changed = False
+
+ @property
+ def pool_id(self):
+ pools = 'storage-systems/%s/storage-pools' % self.ssid
+ url = self.url + pools
+ try:
+ (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
+ except Exception as err:
+ self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
+ "Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+
+ for pool in data:
+ if pool['name'] == self.storage_pool_name:
+ self.pool_data = pool
+ return pool['id']
+
+ self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
+
+ @property
+ def volume_id(self):
+ volumes = 'storage-systems/%s/volumes' % self.ssid
+ url = self.url + volumes
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
+ "Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ qty = 0
+ for volume in data:
+ if volume['name'] == self.base_volume_name:
+ qty += 1
+
+ if qty > 1:
+ self.module.fail_json(msg="More than one volume with the name: %s was found, "
+ "please ensure your volume has a unique name" % self.base_volume_name)
+ else:
+ Id = volume['id']
+ self.volume = volume
+
+ try:
+ return Id
+ except NameError:
+ self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
+
+ @property
+ def snapshot_group_id(self):
+ url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
+ try:
+ rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to fetch snapshot groups. " +
+ "Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
+ for ssg in data:
+ if ssg['name'] == self.name:
+ self.ssg_data = ssg
+ return ssg['id']
+
+ return None
+
+ @property
+ def ssg_needs_update(self):
+ if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
+ self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
+ self.ssg_data['repFullPolicy'] != self.full_policy or \
+ self.ssg_data['rollbackPriority'] != self.rollback_priority:
+ return True
+ else:
+ return False
+
+ def create_snapshot_group(self):
+ self.post_data = dict(
+ baseMappableObjectId=self.volume_id,
+ name=self.name,
+ repositoryPercentage=self.repo_pct,
+ warningThreshold=self.warning_threshold,
+ autoDeleteLimit=self.delete_limit,
+ fullPolicy=self.full_policy,
+ storagePoolId=self.pool_id,
+ )
+ snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
+ url = self.url + snapshot
+ try:
+ rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to create snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ to_native(err)))
+
+ if not self.snapshot_group_id:
+ self.snapshot_group_id = self.ssg_data['id']
+
+ if self.ssg_needs_update:
+ self.update_ssg()
+ else:
+ self.module.exit_json(changed=True, **self.ssg_data)
+
+ def update_ssg(self):
+ self.post_data = dict(
+ warningThreshold=self.warning_threshold,
+ autoDeleteLimit=self.delete_limit,
+ fullPolicy=self.full_policy,
+ rollbackPriority=self.rollback_priority
+ )
+
+ url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
+ try:
+ rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to update snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ to_native(err)))
+
+ def apply(self):
+ if self.state == 'absent':
+ if self.snapshot_group_id:
+ try:
+ rc, resp = request(
+ self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
+ method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
+ validate_certs=self.certs)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to delete snapshot group. " +
+ "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
+ self.ssid,
+ to_native(err)))
+ self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
+ else:
+ self.module.exit_json(changed=False, msg="Snapshot group absent")
+
+ elif self.snapshot_group_id:
+ if self.ssg_needs_update:
+ self.update_ssg()
+ self.module.exit_json(changed=True, **self.ssg_data)
+ else:
+ self.module.exit_json(changed=False, **self.ssg_data)
+ else:
+ self.create_snapshot_group()
+
+
+def main():
+ vg = SnapshotGroup()
+ vg.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py
new file mode 100644
index 00000000..f0ea8fb6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_images
+short_description: NetApp E-Series create and delete snapshot images
+description:
+ - Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
+ - Only the oldest snapshot image can be deleted so consistency is preserved.
+ - "Related: Snapshot volumes are created from snapshot images."
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+options:
+ ssid:
+ description:
+ - Storage system identifier
+ type: str
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ snapshot_group:
+ description:
+ - The name of the snapshot group in which you want to create a snapshot image.
+ required: True
+ type: str
+ state:
+ description:
+ - Whether a new snapshot image should be created or oldest be deleted.
+ required: True
+ type: str
+ choices: ['create', 'remove']
+"""
+EXAMPLES = """
+ - name: Create Snapshot
+ netapp_e_snapshot_images:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_group: "3300000060080E5000299C24000005B656D9F394"
+ state: 'create'
+"""
+RETURN = """
+---
+ msg:
+ description: State of operation
+ type: str
+ returned: always
+ sample: "Created snapshot image"
+ image_id:
+ description: ID of snapshot image
+ type: str
+ returned: state == created
+ sample: "3400000060080E5000299B640063074057BC5C5E "
+"""
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
+ snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
+ snap_groups_url = api_url + snap_groups
+ (ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+
+ snapshot_group_id = None
+ for snapshot_group in snapshot_groups:
+ if name == snapshot_group['label']:
+ snapshot_group_id = snapshot_group['pitGroupRef']
+ break
+ if snapshot_group_id is None:
+ module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
+
+ return snapshot_group
+
+
+def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
+ get_status = 'storage-systems/%s/snapshot-images' % ssid
+ url = api_url + get_status
+
+ try:
+ (ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+ except Exception as err:
+ module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
+ (name, ssid, to_native(err)))
+ if not images:
+ module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
+
+ oldest = min(images, key=lambda x: x['pitSequenceNumber'])
+ if oldest is None or "pitRef" not in oldest:
+ module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
+
+ return oldest
+
+
+def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
+ snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
+ snapshot_group_id = snapshot_group_obj['pitGroupRef']
+ endpoint = 'storage-systems/%s/snapshot-images' % ssid
+ url = api_url + endpoint
+ post_data = json.dumps({'groupId': snapshot_group_id})
+
+ image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+
+ if image_data[1]['status'] == 'optimal':
+ status = True
+ id = image_data[1]['id']
+ else:
+ status = False
+ id = ''
+
+ return status, id
+
+
+def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
+ image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
+ image_id = image['pitRef']
+ endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
+ url = api_url + endpoint
+
+ try:
+ (ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
+ validate_certs=module.params['validate_certs'])
+ except Exception as e:
+ image_data = (e[0], e[1])
+
+ if ret == 204:
+ deleted_status = True
+ error_message = ''
+ else:
+ deleted_status = False
+ error_message = image_data[1]['errorMessage']
+
+ return deleted_status, error_message
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ snapshot_group=dict(required=True, type='str'),
+ ssid=dict(required=True, type='str'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ validate_certs=dict(required=False, type='bool', default=True),
+ state=dict(required=True, choices=['create', 'remove'], type='str'),
+ ))
+ module = AnsibleModule(argument_spec)
+
+ p = module.params
+
+ ssid = p.pop('ssid')
+ api_url = p.pop('api_url')
+ user = p.pop('api_username')
+ pwd = p.pop('api_password')
+ snapshot_group = p.pop('snapshot_group')
+ desired_state = p.pop('state')
+
+ if not api_url.endswith('/'):
+ api_url += '/'
+
+ if desired_state == 'create':
+ created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
+
+ if created_status:
+ module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
+ else:
+ module.fail_json(
+ msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
+ else:
+ deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
+
+ if deleted:
+ module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
+ else:
+ module.fail_json(
+ msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
+ ssid, snapshot_group, error_msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py
new file mode 100644
index 00000000..c8484915
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py
@@ -0,0 +1,289 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: netapp_e_snapshot_volume
+short_description: NetApp E-Series manage snapshot volumes.
+description:
+ - Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+notes:
+ - Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status
+ will be returned, no other changes can be made to a pre-existing snapshot volume.
+options:
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ validate_certs:
+ required: false
+ default: true
+ description:
+ - Should https certificates be validated?
+ type: bool
+ ssid:
+ description:
+ - storage array ID
+ type: str
+ required: true
+ snapshot_image_id:
+ required: True
+ type: str
+ description:
+ - The identifier of the snapshot image used to create the new snapshot volume.
+ - "Note: You'll likely want to use the M(netapp_eseries.santricity.netapp_e_facts) module to find the ID of the image you want."
+ full_threshold:
+ description:
+ - The repository utilization warning threshold percentage
+ default: 85
+ type: int
+ name:
+ required: True
+ description:
+ - The name you wish to give the snapshot volume
+ type: str
+ view_mode:
+ required: True
+ type: str
+ description:
+ - The snapshot volume access mode
+ choices: ['readOnly', 'readWrite', 'modeUnknown', '__Undefined']
+ default: 'readOnly'
+ repo_percentage:
+ description:
+ - The size of the view in relation to the size of the base volume
+ default: 20
+ type: int
+ storage_pool_name:
+ description:
+ - Name of the storage pool on which to allocate the repository volume.
+ type: str
+ required: True
+ state:
+ description:
+ - Whether to create or remove the snapshot volume
+ required: True
+ type: str
+ choices:
+ - absent
+ - present
+"""
+EXAMPLES = """
+ - name: Snapshot volume
+ netapp_e_snapshot_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}/"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ state: present
+ storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
+ snapshot_image_id: "{{ snapshot_volume_image_id }}"
+ name: "{{ snapshot_volume_name }}"
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Json facts for the volume that was created.
+"""
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+import json
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+class SnapshotVolume(object):
+ def __init__(self):
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ api_username=dict(type='str', required=True),
+ api_password=dict(type='str', required=True, no_log=True),
+ api_url=dict(type='str', required=True),
+ ssid=dict(type='str', required=True),
+ snapshot_image_id=dict(type='str', required=True),
+ full_threshold=dict(type='int', default=85),
+ name=dict(type='str', required=True),
+ view_mode=dict(type='str', default='readOnly',
+ choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']),
+ repo_percentage=dict(type='int', default=20),
+ storage_pool_name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present'])
+ ))
+
+ self.module = AnsibleModule(argument_spec=argument_spec)
+ args = self.module.params
+ self.state = args['state']
+ self.ssid = args['ssid']
+ self.snapshot_image_id = args['snapshot_image_id']
+ self.full_threshold = args['full_threshold']
+ self.name = args['name']
+ self.view_mode = args['view_mode']
+ self.repo_percentage = args['repo_percentage']
+ self.storage_pool_name = args['storage_pool_name']
+ self.url = args['api_url']
+ self.user = args['api_username']
+ self.pwd = args['api_password']
+ self.certs = args['validate_certs']
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ @property
+ def pool_id(self):
+ pools = 'storage-systems/%s/storage-pools' % self.ssid
+ url = self.url + pools
+ (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ validate_certs=self.certs)
+
+ for pool in data:
+ if pool['name'] == self.storage_pool_name:
+ self.pool_data = pool
+ return pool['id']
+
+ self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
+
+ @property
+ def ss_vol_exists(self):
+ rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS,
+ url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
+ if ss_vols:
+ for ss_vol in ss_vols:
+ if ss_vol['name'] == self.name:
+ self.ss_vol = ss_vol
+ return True
+ else:
+ return False
+
+ return False
+
+ @property
+ def ss_vol_needs_update(self):
+ if self.ss_vol['fullWarnThreshold'] != self.full_threshold:
+ return True
+ else:
+ return False
+
+ def create_ss_vol(self):
+ post_data = dict(
+ snapshotImageId=self.snapshot_image_id,
+ fullThreshold=self.full_threshold,
+ name=self.name,
+ viewMode=self.view_mode,
+ repositoryPercentage=self.repo_percentage,
+ repositoryPoolId=self.pool_id
+ )
+
+ rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid,
+ data=json.dumps(post_data), headers=HEADERS, url_username=self.user,
+ url_password=self.pwd, validate_certs=self.certs, method='POST')
+
+ self.ss_vol = create_resp
+ # Doing a check after creation because the creation call fails to set the specified warning threshold
+ if self.ss_vol_needs_update:
+ self.update_ss_vol()
+ else:
+ self.module.exit_json(changed=True, **create_resp)
+
+ def update_ss_vol(self):
+ post_data = dict(
+ fullThreshold=self.full_threshold,
+ )
+
+ rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
+ data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd,
+ method='POST', validate_certs=self.certs)
+
+ self.module.exit_json(changed=True, **resp)
+
+ def remove_ss_vol(self):
+ rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
+ headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
+ method='DELETE')
+ self.module.exit_json(changed=True, msg="Volume successfully deleted")
+
+ def apply(self):
+ if self.state == 'present':
+ if self.ss_vol_exists:
+ if self.ss_vol_needs_update:
+ self.update_ss_vol()
+ else:
+ self.module.exit_json(changed=False, **self.ss_vol)
+ else:
+ self.create_ss_vol()
+ else:
+ if self.ss_vol_exists:
+ self.remove_ss_vol()
+ else:
+ self.module.exit_json(changed=False, msg="Volume already absent")
+
+
+def main():
+ sv = SnapshotVolume()
+ sv.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py
new file mode 100644
index 00000000..a0f0d005
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = '''
+module: netapp_e_storage_system
+version_added: "2.2"
+short_description: NetApp E-Series Web Services Proxy manage storage arrays
+description:
+- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
+options:
+ api_username:
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ required: true
+ api_password:
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ required: true
+ api_url:
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API.
+ required: true
+ type: str
+ validate_certs:
+ description:
+ - Should https certificates be validated?
+ type: bool
+ default: 'yes'
+ ssid:
+ description:
+ - The ID of the array to manage. This value must be unique for each array.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether the specified array should be configured on the Web Services Proxy or not.
+ required: true
+ type: str
+ choices: ['present', 'absent']
+ controller_addresses:
+ description:
+ - The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
+ type: list
+ required: true
+ array_wwn:
+ description:
+ - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of
+ controller_addresses parameter.
+ type: str
+ array_password:
+ description:
+ - The management password of the array to manage, if set.
+ type: str
+ enable_trace:
+ description:
+ - Enable trace logging for SYMbol calls to the storage system.
+ type: bool
+ default: 'no'
+ meta_tags:
+ description:
+ - Optional meta tags to associate to this storage system
+ type: list
+ array_status_timeout_sec:
+ description:
+ - Array status timeout measured in seconds
+ default: 60
+ type: int
+author: Kevin Hulquest (@hulquest)
+'''
+
+EXAMPLES = '''
+---
+ - name: Presence of storage system
+ netapp_e_storage_system:
+ ssid: "{{ item.key }}"
+ state: present
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ controller_addresses:
+ - "{{ item.value.address1 }}"
+ - "{{ item.value.address2 }}"
+ with_dict: "{{ storage_systems }}"
+ when: check_storage_system
+'''
+
+RETURN = '''
+msg:
+ description: State of request
+ type: str
+ returned: always
+ sample: 'Storage system removed.'
+'''
+import json
+from datetime import datetime as dt, timedelta
+from time import sleep
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+def request(url, data=None, headers=None, method='GET', use_proxy=True,
+ force=False, last_mod_time=None, timeout=10, validate_certs=True,
+ url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
+ try:
+ r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
+ force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
+ url_username=url_username, url_password=url_password, http_agent=http_agent,
+ force_basic_auth=force_basic_auth)
+ except HTTPError as err:
+ r = err.fp
+
+ try:
+ raw_data = r.read()
+ if raw_data:
+ data = json.loads(raw_data)
+ else:
+ raw_data = None
+ except Exception:
+ if ignore_errors:
+ pass
+ else:
+ raise Exception(raw_data)
+
+ resp_code = r.getcode()
+
+ if resp_code >= 400 and not ignore_errors:
+ raise Exception(resp_code, data)
+ else:
+ return resp_code, data
+
+
+def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
+ (rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
+ method='POST', url_username=api_usr, url_password=api_pwd,
+ validate_certs=validate_certs)
+ status = None
+ return_resp = resp
+ if 'status' in resp:
+ status = resp['status']
+
+ if rc == 201:
+ status = 'neverContacted'
+ fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
+
+ while status == 'neverContacted':
+ if dt.utcnow() > fail_after_time:
+ raise Exception("web proxy timed out waiting for array status")
+
+ sleep(1)
+ (rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
+ headers=dict(Accept="application/json"), url_username=api_usr,
+ url_password=api_pwd, validate_certs=validate_certs,
+ ignore_errors=True)
+ status = system_resp['status']
+ return_resp = system_resp
+
+ return status, return_resp
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ ssid=dict(required=True, type='str'),
+ controller_addresses=dict(type='list'),
+ array_wwn=dict(required=False, type='str'),
+ array_password=dict(required=False, type='str', no_log=True),
+ array_status_timeout_sec=dict(default=60, type='int'),
+ enable_trace=dict(default=False, type='bool'),
+ meta_tags=dict(type='list')
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['controller_addresses', 'array_wwn']],
+ required_if=[('state', 'present', ['controller_addresses'])]
+ )
+
+ p = module.params
+
+ state = p['state']
+ ssid = p['ssid']
+ controller_addresses = p['controller_addresses']
+ array_wwn = p['array_wwn']
+ array_password = p['array_password']
+ array_status_timeout_sec = p['array_status_timeout_sec']
+ validate_certs = p['validate_certs']
+ meta_tags = p['meta_tags']
+ enable_trace = p['enable_trace']
+
+ api_usr = p['api_username']
+ api_pwd = p['api_password']
+ api_url = p['api_url']
+
+ changed = False
+ array_exists = False
+
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
+ url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
+ ignore_errors=True)
+ except Exception as err:
+ module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, to_native(err)))
+
+ array_exists = True
+ array_detail = resp
+
+ if rc == 200:
+ if state == 'absent':
+ changed = True
+ array_exists = False
+ elif state == 'present':
+ current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
+ if set(controller_addresses) != current_addresses:
+ changed = True
+ if array_detail['wwn'] != array_wwn and array_wwn is not None:
+ module.fail_json(
+ msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' %
+ (ssid, array_detail['wwn'])
+ )
+ elif rc == 404:
+ if state == 'present':
+ changed = True
+ array_exists = False
+ else:
+ changed = False
+ module.exit_json(changed=changed, msg="Storage system was not present.")
+
+ if changed and not module.check_mode:
+ if state == 'present':
+ if not array_exists:
+ # add the array
+ array_add_req = dict(
+ id=ssid,
+ controllerAddresses=controller_addresses,
+ metaTags=meta_tags,
+ enableTrace=enable_trace
+ )
+
+ if array_wwn:
+ array_add_req['wwn'] = array_wwn
+
+ if array_password:
+ array_add_req['password'] = array_password
+
+ post_headers = dict(Accept="application/json")
+ post_headers['Content-Type'] = 'application/json'
+ request_data = json.dumps(array_add_req)
+
+ try:
+ (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
+ array_status_timeout_sec)
+ except Exception as err:
+ module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
+ (ssid, request_data, to_native(err)))
+
+ else: # array exists, modify...
+ post_headers = dict(Accept="application/json")
+ post_headers['Content-Type'] = 'application/json'
+ post_body = dict(
+ controllerAddresses=controller_addresses,
+ removeAllTags=True,
+ enableTrace=enable_trace,
+ metaTags=meta_tags
+ )
+
+ try:
+ (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
+ array_status_timeout_sec)
+ except Exception as err:
+ module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
+ (ssid, post_body, to_native(err)))
+
+ elif state == 'absent':
+ # delete the array
+ try:
+ (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
+ url_username=api_usr,
+ url_password=api_pwd, validate_certs=validate_certs)
+ except Exception as err:
+ module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, to_native(err)))
+
+ if rc == 422:
+ module.exit_json(changed=changed, msg="Storage system was not presented.")
+ if rc == 204:
+ module.exit_json(changed=changed, msg="Storage system removed.")
+
+ module.exit_json(changed=changed, **resp)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py
new file mode 100644
index 00000000..5c74a415
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py
@@ -0,0 +1,941 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {"metadata_version": "1.1",
+ "status": ["deprecated"],
+ "supported_by": "community"}
+
+DOCUMENTATION = """
+---
+module: netapp_e_storagepool
+short_description: NetApp E-Series manage volume groups and disk pools
+description: Create or remove volume groups and disk pools for NetApp E-series storage arrays.
+version_added: '2.2'
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Whether the specified storage pool should exist or not.
+ - Note that removing a storage pool currently requires the removal of all defined volumes first.
+ required: true
+ type: str
+ choices: ["present", "absent"]
+ name:
+ description:
+ - The name of the storage pool to manage
+ type: str
+ required: true
+ criteria_drive_count:
+ description:
+ - The number of disks to use for building the storage pool.
+ - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
+ - The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below)
+ required: false
+ type: int
+ criteria_min_usable_capacity:
+ description:
+ - The minimum size of the storage pool (in size_unit).
+ - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified.
+ - The pool will be expanded if this value exceeds its current size. (See expansion note below)
+ required: false
+ type: float
+ criteria_drive_type:
+ description:
+ - The type of disk (hdd or ssd) to use when searching for candidates to use.
+ - When not specified each drive type will be evaluated until successful drive candidates are found starting with
+ the most prevalent drive type.
+ required: false
+ type: str
+ choices: ["hdd","ssd"]
+ criteria_size_unit:
+ description:
+ - The unit used to interpret size parameters
+ choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"]
+ type: str
+ default: "gb"
+ criteria_drive_min_size:
+ description:
+ - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
+ type: float
+ criteria_drive_interface_type:
+ description:
+ - The interface type to use when selecting drives for the storage pool
+ - If not provided then all interface types will be considered.
+ choices: ["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"]
+ type: str
+ required: false
+ criteria_drive_require_da:
+ description:
+ - Ensures the storage pool will be created with only data assurance (DA) capable drives.
+ - Only available for new storage pools; existing storage pools cannot be converted.
+ default: false
+ type: bool
+ version_added: '2.9'
+ criteria_drive_require_fde:
+ description:
+ - Whether full disk encryption ability is required for drives to be added to the storage pool
+ default: false
+ type: bool
+ raid_level:
+ description:
+ - The RAID level of the storage pool to be created.
+ - Required only when I(state=="present").
+ - When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required
+ depending on the storage array specifications.
+ - When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required.
+ - When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required.
+ - When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required.
+ - When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required.
+ - When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required.
+ - Note that raidAll will be treated as raidDiskPool and raid3 as raid5.
+ required: false
+ choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"]
+ type: str
+ default: "raidDiskPool"
+ secure_pool:
+ description:
+ - Enables security at rest feature on the storage pool.
+ - Will only work if all drives in the pool are security capable (FDE, FIPS, or mix)
+ - Warning, once security is enabled it is impossible to disable without erasing the drives.
+ required: false
+ type: bool
+ reserve_drive_count:
+ description:
+ - Set the number of drives reserved by the storage pool for reconstruction operations.
+ - Only valid on raid disk pools.
+ type: int
+ required: false
+ remove_volumes:
+ description:
+ - Prior to removing a storage pool, delete all volumes in the pool.
+ default: true
+ type: bool
+ erase_secured_drives:
+ description:
+ - If I(state=="absent") then all storage pool drives will be erase
+ - If I(state=="present") then delete all available storage array drives that have security enabled.
+ default: true
+ type: bool
+notes:
+ - The expansion operations are non-blocking due to the time consuming nature of expanding volume groups
+ - Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each
+ required step will be attempted until the request fails which is likely because of the required expansion time.
+ - raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5.
+ - Tray loss protection and drawer loss protection will be chosen if at all possible.
+"""
+EXAMPLES = """
+- name: No disk groups
+ netapp_e_storagepool:
+ ssid: "{{ ssid }}"
+ name: "{{ item }}"
+ state: absent
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Json facts for the pool that was created.
+"""
+import functools
+from itertools import groupby
+from time import sleep
+from pprint import pformat
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+def get_most_common_elements(iterator):
+ """Returns a generator containing a descending list of most common elements."""
+ if not isinstance(iterator, list):
+ raise TypeError("iterator must be a list.")
+
+ grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))]
+ return sorted(grouped, key=lambda x: x[1], reverse=True)
+
+
+def memoize(func):
+ """Generic memoizer for any function with any number of arguments including zero."""
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ class MemoizeFuncArgs(dict):
+ def __missing__(self, _key):
+ self[_key] = func(*args, **kwargs)
+ return self[_key]
+
+ key = str((args, kwargs)) if args and kwargs else "no_argument_response"
+ return MemoizeFuncArgs().__getitem__(key)
+
+ return wrapper
+
+
+class NetAppESeriesStoragePool(NetAppESeriesModule):
+ EXPANSION_TIMEOUT_SEC = 10
+ DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11
+
+ def __init__(self):
+ version = "02.00.0000.0000"
+ ansible_options = dict(
+ state=dict(required=True, choices=["present", "absent"], type="str"),
+ name=dict(required=True, type="str"),
+ criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
+ default="gb", type="str"),
+ criteria_drive_count=dict(type="int"),
+ criteria_drive_interface_type=dict(choices=["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"],
+ type="str"),
+ criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False),
+ criteria_drive_min_size=dict(type="float"),
+ criteria_drive_require_da=dict(type="bool", required=False),
+ criteria_drive_require_fde=dict(type="bool", required=False),
+ criteria_min_usable_capacity=dict(type="float"),
+ raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"],
+ default="raidDiskPool"),
+ erase_secured_drives=dict(type="bool", default=True),
+ secure_pool=dict(type="bool", default=False),
+ reserve_drive_count=dict(type="int"),
+ remove_volumes=dict(type="bool", default=True))
+
+ required_if = [["state", "present", ["raid_level"]]]
+ super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options,
+ web_services_version=version,
+ supports_check_mode=True,
+ required_if=required_if)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.ssid = args["ssid"]
+ self.name = args["name"]
+ self.criteria_drive_count = args["criteria_drive_count"]
+ self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"]
+ self.criteria_size_unit = args["criteria_size_unit"]
+ self.criteria_drive_min_size = args["criteria_drive_min_size"]
+ self.criteria_drive_type = args["criteria_drive_type"]
+ self.criteria_drive_interface_type = args["criteria_drive_interface_type"]
+ self.criteria_drive_require_fde = args["criteria_drive_require_fde"]
+ self.criteria_drive_require_da = args["criteria_drive_require_da"]
+ self.raid_level = args["raid_level"]
+ self.erase_secured_drives = args["erase_secured_drives"]
+ self.secure_pool = args["secure_pool"]
+ self.reserve_drive_count = args["reserve_drive_count"]
+ self.remove_volumes = args["remove_volumes"]
+ self.pool_detail = None
+
+ # Change all sizes to be measured in bytes
+ if self.criteria_min_usable_capacity:
+ self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity *
+ self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ if self.criteria_drive_min_size:
+ self.criteria_drive_min_size = int(self.criteria_drive_min_size *
+ self.SIZE_UNIT_MAP[self.criteria_size_unit])
+ self.criteria_size_unit = "bytes"
+
+ # Adjust unused raid level option to reflect documentation
+ if self.raid_level == "raidAll":
+ self.raid_level = "raidDiskPool"
+ if self.raid_level == "raid3":
+ self.raid_level = "raid5"
+
+ @property
+ @memoize
+ def available_drives(self):
+ """Determine the list of available drives"""
+ return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"]
+
+ @property
+ @memoize
+ def available_drive_types(self):
+ """Determine the types of available drives sorted by the most common first."""
+ types = [drive["driveMediaType"] for drive in self.drives]
+ return [entry[0] for entry in get_most_common_elements(types)]
+
+ @property
+ @memoize
+ def available_drive_interface_types(self):
+ """Determine the types of available drives."""
+ interfaces = [drive["phyDriveType"] for drive in self.drives]
+ return [entry[0] for entry in get_most_common_elements(interfaces)]
+
+ @property
+ def storage_pool_drives(self):
+ """Retrieve list of drives found in storage pool."""
+ return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]]
+
+ @property
+ def expandable_drive_count(self):
+ """Maximum number of drives that a storage pool can be expanded at a given time."""
+ capabilities = None
+ if self.raid_level == "raidDiskPool":
+ return len(self.available_drives)
+
+ try:
+ rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return capabilities["featureParameters"]["maxDCEDrives"]
+
+ @property
+ def disk_pool_drive_minimum(self):
+ """Provide the storage array's minimum disk pool drive count."""
+ rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True)
+
+ # Standard minimum is 11 drives but some allow 10 drives. 10 will be the default
+ if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or
+ attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0):
+ return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT
+
+ return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"]
+
+ def get_available_drive_capacities(self, drive_id_list=None):
+ """Determine the list of available drive capacities."""
+ if drive_id_list:
+ available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
+ if drive["id"] in drive_id_list and drive["available"] and
+ drive["status"] == "optimal"])
+ else:
+ available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives
+ if drive["available"] and drive["status"] == "optimal"])
+
+ self.module.log("available drive capacities: %s" % available_drive_capacities)
+ return list(available_drive_capacities)
+
+ @property
+ def drives(self):
+ """Retrieve list of drives found in storage pool."""
+ drives = None
+ try:
+ rc, drives = self.request("storage-systems/%s/drives" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ return drives
+
+ def is_drive_count_valid(self, drive_count):
+ """Validate drive count criteria is met."""
+ if self.criteria_drive_count and drive_count < self.criteria_drive_count:
+ return False
+
+ if self.raid_level == "raidDiskPool":
+ return drive_count >= self.disk_pool_drive_minimum
+ if self.raid_level == "raid0":
+ return drive_count > 0
+ if self.raid_level == "raid1":
+ return drive_count >= 2 and (drive_count % 2) == 0
+ if self.raid_level in ["raid3", "raid5"]:
+ return 3 <= drive_count <= 30
+ if self.raid_level == "raid6":
+ return 5 <= drive_count <= 30
+ return False
+
+ @property
+ def storage_pool(self):
+ """Retrieve storage pool information."""
+ storage_pools_resp = None
+ try:
+ rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
+ % (self.ssid, to_native(err), self.state))
+
+ pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name]
+ return pool_detail[0] if pool_detail else dict()
+
+ @property
+ def storage_pool_volumes(self):
+ """Retrieve list of volumes associated with storage pool."""
+ volumes_resp = None
+ try:
+ rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]."
+ % (self.ssid, to_native(err), self.state))
+
+ group_ref = self.storage_pool["volumeGroupRef"]
+ storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref]
+ return storage_pool_volume_list
+
+ def get_ddp_capacity(self, expansion_drive_list):
+ """Return the total usable capacity based on the additional drives."""
+
+ def get_ddp_error_percent(_drive_count, _extent_count):
+ """Determine the space reserved for reconstruction"""
+ if _drive_count <= 36:
+ if _extent_count <= 600:
+ return 0.40
+ elif _extent_count <= 1400:
+ return 0.35
+ elif _extent_count <= 6200:
+ return 0.20
+ elif _extent_count <= 50000:
+ return 0.15
+ elif _drive_count <= 64:
+ if _extent_count <= 600:
+ return 0.20
+ elif _extent_count <= 1400:
+ return 0.15
+ elif _extent_count <= 6200:
+ return 0.10
+ elif _extent_count <= 50000:
+ return 0.05
+ elif _drive_count <= 480:
+ if _extent_count <= 600:
+ return 0.20
+ elif _extent_count <= 1400:
+ return 0.15
+ elif _extent_count <= 6200:
+ return 0.10
+ elif _extent_count <= 50000:
+ return 0.05
+
+ self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid)
+
+ def get_ddp_reserved_drive_count(_disk_count):
+ """Determine the number of reserved drive."""
+ reserve_count = 0
+
+ if self.reserve_drive_count:
+ reserve_count = self.reserve_drive_count
+ elif _disk_count >= 256:
+ reserve_count = 8
+ elif _disk_count >= 192:
+ reserve_count = 7
+ elif _disk_count >= 128:
+ reserve_count = 6
+ elif _disk_count >= 64:
+ reserve_count = 4
+ elif _disk_count >= 32:
+ reserve_count = 3
+ elif _disk_count >= 12:
+ reserve_count = 2
+ elif _disk_count == 11:
+ reserve_count = 1
+
+ return reserve_count
+
+ if self.pool_detail:
+ drive_count = len(self.storage_pool_drives) + len(expansion_drive_list)
+ else:
+ drive_count = len(expansion_drive_list)
+
+ drive_usable_capacity = min(min(self.get_available_drive_capacities()),
+ min(self.get_available_drive_capacities(expansion_drive_list)))
+ drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912)
+ maximum_stripe_count = (drive_count * drive_data_extents) / 10
+
+ error_percent = get_ddp_error_percent(drive_count, drive_data_extents)
+ error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10
+
+ total_stripe_count = maximum_stripe_count - error_overhead
+ stripe_count_per_drive = total_stripe_count / drive_count
+ reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive
+ available_stripe_count = total_stripe_count - reserved_stripe_count
+
+ return available_stripe_count * 4294967296
+
+ @memoize
+ def get_candidate_drives(self):
+ """Retrieve set of drives candidates for creating a new storage pool."""
+
+ def get_candidate_drive_request():
+ """Perform request for new volume creation."""
+ candidates_list = list()
+ drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types
+ interface_types = [self.criteria_drive_interface_type] \
+ if self.criteria_drive_interface_type else self.available_drive_interface_types
+
+ for interface_type in interface_types:
+ for drive_type in drive_types:
+ candidates = None
+ volume_candidate_request_data = dict(
+ type="diskPool" if self.raid_level == "raidDiskPool" else "traditional",
+ diskPoolVolumeCandidateRequestData=dict(
+ reconstructionReservedDriveCount=65535))
+ candidate_selection_type = dict(
+ candidateSelectionType="count",
+ driveRefList=dict(driveRef=self.available_drives))
+ criteria = dict(raidLevel=self.raid_level,
+ phyDriveType=interface_type,
+ dssPreallocEnabled=False,
+ securityType="capable" if self.criteria_drive_require_fde else "none",
+ driveMediaType=drive_type,
+ onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False,
+ volumeCandidateRequestData=volume_candidate_request_data,
+ allocateReserveSpace=False,
+ securityLevel="fde" if self.criteria_drive_require_fde else "none",
+ candidateSelectionType=candidate_selection_type)
+
+ try:
+ rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError"
+ "Response=true" % self.ssid, data=criteria, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ if candidates:
+ candidates_list.extend(candidates["volumeCandidate"])
+
+ # Sort output based on tray and then drawer protection first
+ tray_drawer_protection = list()
+ tray_protection = list()
+ drawer_protection = list()
+ no_protection = list()
+ sorted_candidates = list()
+ for item in candidates_list:
+ if item["trayLossProtection"]:
+ if item["drawerLossProtection"]:
+ tray_drawer_protection.append(item)
+ else:
+ tray_protection.append(item)
+ elif item["drawerLossProtection"]:
+ drawer_protection.append(item)
+ else:
+ no_protection.append(item)
+
+ if tray_drawer_protection:
+ sorted_candidates.extend(tray_drawer_protection)
+ if tray_protection:
+ sorted_candidates.extend(tray_protection)
+ if drawer_protection:
+ sorted_candidates.extend(drawer_protection)
+ if no_protection:
+ sorted_candidates.extend(no_protection)
+
+ return sorted_candidates
+
+ # Determine the appropriate candidate list
+ for candidate in get_candidate_drive_request():
+
+ # Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size
+ if self.criteria_drive_count:
+ if self.criteria_drive_count != int(candidate["driveCount"]):
+ continue
+ if self.criteria_min_usable_capacity:
+ if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity >
+ self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or
+ self.criteria_min_usable_capacity > int(candidate["usableSize"])):
+ continue
+ if self.criteria_drive_min_size:
+ if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])):
+ continue
+
+ return candidate
+
+ self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
+
+ @memoize
+ def get_expansion_candidate_drives(self):
+ """Retrieve required expansion drive list.
+
+ Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there
+ is a potential limitation on how many drives can be incorporated at a time.
+ * Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools.
+
+ :return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint
+ """
+
+ def get_expansion_candidate_drive_request():
+ """Perform the request for expanding existing volume groups or disk pools.
+
+ Note: the list of candidate structures do not necessarily produce candidates that meet all criteria.
+ """
+ candidates_list = None
+ url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid
+
+ try:
+ rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"])
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]."
+ % (self.ssid, to_native(error)))
+
+ return candidates_list["candidates"]
+
+ required_candidate_list = list()
+ required_additional_drives = 0
+ required_additional_capacity = 0
+ total_required_capacity = 0
+
+ # determine whether and how much expansion is need to satisfy the specified criteria
+ if self.criteria_min_usable_capacity:
+ total_required_capacity = self.criteria_min_usable_capacity
+ required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"])
+
+ if self.criteria_drive_count:
+ required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives)
+
+ # Determine the appropriate expansion candidate list
+ if required_additional_drives > 0 or required_additional_capacity > 0:
+ for candidate in get_expansion_candidate_drive_request():
+
+ if self.criteria_drive_min_size:
+ if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])):
+ continue
+
+ if self.raid_level == "raidDiskPool":
+ if (len(candidate["drives"]) >= required_additional_drives and
+ self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity):
+ required_candidate_list.append(candidate)
+ break
+ else:
+ required_additional_drives -= len(candidate["drives"])
+ required_additional_capacity -= int(candidate["usableCapacity"])
+ required_candidate_list.append(candidate)
+
+ # Determine if required drives and capacities are satisfied
+ if required_additional_drives <= 0 and required_additional_capacity <= 0:
+ break
+ else:
+ self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid)
+
+ return required_candidate_list
+
+ def get_reserve_drive_count(self):
+ """Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool)."""
+
+ if not self.pool_detail:
+ self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid)
+
+ if self.raid_level != "raidDiskPool":
+ self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
+ % (self.pool_detail["id"], self.ssid))
+
+ return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"]
+
+ def get_maximum_reserve_drive_count(self):
+ """Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool)."""
+ if self.raid_level != "raidDiskPool":
+ self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]."
+ % (self.pool_detail["id"], self.ssid))
+
+ drives_ids = list()
+
+ if self.pool_detail:
+ drives_ids.extend(self.storage_pool_drives)
+ for candidate in self.get_expansion_candidate_drives():
+ drives_ids.extend((candidate["drives"]))
+ else:
+ candidate = self.get_candidate_drives()
+ drives_ids.extend(candidate["driveRefList"]["driveRef"])
+
+ drive_count = len(drives_ids)
+ maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10)
+ if maximum_reserve_drive_count > 10:
+ maximum_reserve_drive_count = 10
+
+ return maximum_reserve_drive_count
+
+ def set_reserve_drive_count(self, check_mode=False):
+ """Set the reserve drive count for raidDiskPool."""
+ changed = False
+
+ if self.raid_level == "raidDiskPool" and self.reserve_drive_count:
+ maximum_count = self.get_maximum_reserve_drive_count()
+
+ if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count:
+ self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. "
+ "Note that it may be necessary to wait for expansion operations to complete "
+ "before the adjusting the reserve drive count. Maximum [%s]. Array [%s]."
+ % (maximum_count, self.ssid))
+
+ if self.reserve_drive_count != self.get_reserve_drive_count():
+ changed = True
+
+ if not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid,
+ method="POST", data=dict(volumeGroupRef=self.pool_detail["id"],
+ newDriveCount=self.reserve_drive_count))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]."
+ " Array [%s]." % (self.pool_detail["id"], self.ssid))
+
+ return changed
+
+ def erase_all_available_secured_drives(self, check_mode=False):
+ """Erase all available drives that have encryption at rest feature enabled."""
+ changed = False
+ drives_list = list()
+ for drive in self.drives:
+ if drive["available"] and drive["fdeEnabled"]:
+ changed = True
+ drives_list.append(drive["id"])
+
+ if drives_list and not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
+ % self.ssid, method="POST", data=dict(driveRef=drives_list))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid)
+
+ return changed
+
+ def create_storage_pool(self):
+ """Create new storage pool."""
+ url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid
+ request_body = dict(label=self.name,
+ candidate=self.get_candidate_drives())
+
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid
+
+ request_body.update(
+ dict(backgroundOperationPriority="useDefault",
+ criticalReconstructPriority="useDefault",
+ degradedReconstructPriority="useDefault",
+ poolUtilizationCriticalThreshold=65535,
+ poolUtilizationWarningThreshold=0))
+
+ if self.reserve_drive_count:
+ request_body.update(dict(volumeCandidateData=dict(
+ diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count))))
+
+ try:
+ rc, resp = self.request(url, method="POST", data=request_body)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error[%s]."
+ % (self.ssid, to_native(error)))
+
+ # Update drive and storage pool information
+ self.pool_detail = self.storage_pool
+
+ def delete_storage_pool(self):
+ """Delete storage pool."""
+ storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]]
+ try:
+ delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else ""
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s%s"
+ % (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]."
+ % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ if storage_pool_drives and self.erase_secured_drives:
+ try:
+ rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true"
+ % self.ssid, method="POST", data=dict(driveRef=storage_pool_drives))
+ except Exception as error:
+ self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]."
+ " Error [%s]." % (self.ssid, to_native(error)))
+
+ def secure_storage_pool(self, check_mode=False):
+ """Enable security on an existing storage pool"""
+ self.pool_detail = self.storage_pool
+ needs_secure_pool = False
+
+ if not self.secure_pool and self.pool_detail["securityType"] == "enabled":
+ self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.")
+ if self.secure_pool and self.pool_detail["securityType"] != "enabled":
+ needs_secure_pool = True
+
+ if needs_secure_pool and not check_mode:
+ try:
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]),
+ data=dict(securePool=True), method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error"
+ " [%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ self.pool_detail = self.storage_pool
+ return needs_secure_pool
+
+ def migrate_raid_level(self, check_mode=False):
+ """Request storage pool raid level migration."""
+ needs_migration = self.raid_level != self.pool_detail["raidLevel"]
+ if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool":
+ self.module.fail_json(msg="Raid level cannot be changed for disk pools")
+
+ if needs_migration and not check_mode:
+ sp_raid_migrate_req = dict(raidLevel=self.raid_level)
+
+ try:
+ rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration"
+ % (self.ssid, self.name), data=sp_raid_migrate_req, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]."
+ " Error[%s]." % (self.ssid, to_native(error)))
+
+ self.pool_detail = self.storage_pool
+ return needs_migration
+
+ def expand_storage_pool(self, check_mode=False):
+ """Add drives to existing storage pool.
+
+ :return bool: whether drives were required to be added to satisfy the specified criteria."""
+ expansion_candidate_list = self.get_expansion_candidate_drives()
+ changed_required = bool(expansion_candidate_list)
+ estimated_completion_time = 0.0
+
+ # build expandable groupings of traditional raid candidate
+ required_expansion_candidate_list = list()
+ while expansion_candidate_list:
+ subset = list()
+ while expansion_candidate_list and len(subset) < self.expandable_drive_count:
+ subset.extend(expansion_candidate_list.pop()["drives"])
+ required_expansion_candidate_list.append(subset)
+
+ if required_expansion_candidate_list and not check_mode:
+ url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid
+ if self.raid_level == "raidDiskPool":
+ url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid
+
+ while required_expansion_candidate_list:
+ candidate_drives_list = required_expansion_candidate_list.pop()
+ request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"],
+ driveRef=candidate_drives_list)
+ try:
+ rc, resp = self.request(url, method="POST", data=request_body)
+ except Exception as error:
+ rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
+ % (self.ssid, self.pool_detail["id"]), ignore_errors=True)
+ if rc == 200 and actions_resp:
+ actions = [action["currentAction"] for action in actions_resp
+ if action["volumeRef"] in self.storage_pool_volumes]
+ self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions"
+ " in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error[%s]."
+ % (", ".join(actions), self.pool_detail["id"], self.ssid,
+ to_native(error)))
+
+ self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]."
+ " Error[%s]." % (self.pool_detail["id"], self.ssid, to_native(error)))
+
+ # Wait for expansion completion unless it is the last request in the candidate list
+ if required_expansion_candidate_list:
+ for dummy in range(self.EXPANSION_TIMEOUT_SEC):
+ rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress"
+ % (self.ssid, self.pool_detail["id"]), ignore_errors=True)
+ if rc == 200:
+ for action in actions_resp:
+ if (action["volumeRef"] in self.storage_pool_volumes and
+ action["currentAction"] == "remappingDce"):
+ sleep(1)
+ estimated_completion_time = action["estimatedTimeToCompletion"]
+ break
+ else:
+ estimated_completion_time = 0.0
+ break
+
+ return changed_required, estimated_completion_time
+
+ def apply(self):
+ """Apply requested state to storage array."""
+ changed = False
+
+ if self.state == "present":
+ if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None:
+ self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be"
+ " specified.")
+ if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count):
+ self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.")
+
+ self.pool_detail = self.storage_pool
+ self.module.log(pformat(self.pool_detail))
+
+ if self.state == "present" and self.erase_secured_drives:
+ self.erase_all_available_secured_drives(check_mode=True)
+
+ # Determine whether changes need to be applied to the storage array
+ if self.pool_detail:
+
+ if self.state == "absent":
+ changed = True
+
+ elif self.state == "present":
+
+ if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives):
+ self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]."
+ % (self.ssid, self.pool_detail["id"]))
+
+ if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]:
+ self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type."
+ " Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"]))
+
+ if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da !=
+ self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]):
+ self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]."
+ " Pool [%s]." % (self.ssid, self.pool_detail["id"]))
+
+ # Evaluate current storage pool for required change.
+ needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True)
+ if needs_expansion:
+ changed = True
+ if self.migrate_raid_level(check_mode=True):
+ changed = True
+ if self.secure_storage_pool(check_mode=True):
+ changed = True
+ if self.set_reserve_drive_count(check_mode=True):
+ changed = True
+
+ elif self.state == "present":
+ changed = True
+
+ # Apply changes to storage array
+ msg = "No changes were required for the storage pool [%s]."
+ if changed and not self.module.check_mode:
+ if self.state == "present":
+ if self.erase_secured_drives:
+ self.erase_all_available_secured_drives()
+
+ if self.pool_detail:
+ change_list = list()
+
+ # Expansion needs to occur before raid level migration to account for any sizing needs.
+ expanded, estimated_completion_time = self.expand_storage_pool()
+ if expanded:
+ change_list.append("expanded")
+ if self.migrate_raid_level():
+ change_list.append("raid migration")
+ if self.secure_storage_pool():
+ change_list.append("secured")
+ if self.set_reserve_drive_count():
+ change_list.append("adjusted reserve drive count")
+
+ if change_list:
+ msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list)
+
+ if expanded:
+ msg += "\nThe expansion operation will complete in an estimated %s minutes."\
+ % estimated_completion_time
+ else:
+ self.create_storage_pool()
+ msg = "Storage pool [%s] was created."
+
+ if self.secure_storage_pool():
+ msg = "Storage pool [%s] was created and secured."
+ if self.set_reserve_drive_count():
+ msg += " Adjusted reserve drive count."
+
+ elif self.pool_detail:
+ self.delete_storage_pool()
+ msg = "Storage pool [%s] removed."
+
+ self.pool_detail = self.storage_pool
+ self.module.log(pformat(self.pool_detail))
+ self.module.log(msg % self.name)
+ self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail)
+
+
+def main():
+ storage_pool = NetAppESeriesStoragePool()
+ storage_pool.apply()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py
new file mode 100644
index 00000000..1e6e8588
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+
+# (c) 2018, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_syslog
+short_description: NetApp E-Series manage syslog settings
+description:
+ - Allow the syslog settings to be configured for an individual E-Series storage-system
+version_added: '2.7'
+author: Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Add or remove the syslog server configuration for E-Series storage array.
+ - Existing syslog server configuration will be removed or updated when its address matches I(address).
+ - Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be
+ treated as a match.
+ choices:
+ - present
+ - absent
+ type: str
+ default: present
+ address:
+ description:
+ - The syslog server's IPv4 address or a fully qualified hostname.
+ - All existing syslog configurations will be removed when I(state=absent) and I(address=None).
+ type: str
+ port:
+ description:
+ - This is the port the syslog server is using.
+ default: 514
+ type: int
+ protocol:
+ description:
+ - This is the transmission protocol the syslog server's using to receive syslog messages.
+ choices:
+ - udp
+ - tcp
+ - tls
+ default: udp
+ type: str
+ components:
+ description:
+ - The e-series logging components define the specific logs to transfer to the syslog server.
+ - At the time of writing, 'auditLog' is the only logging component but more may become available.
+ default: ["auditLog"]
+ type: list
+ test:
+ description:
+ - This forces a test syslog message to be sent to the stated syslog server.
+ - Only attempts transmission when I(state=present).
+ type: bool
+ default: no
+ log_path:
+ description:
+ - This argument specifies a local path for logging purposes.
+ type: str
+ required: no
+notes:
+ - Check mode is supported.
+ - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with
+ SANtricity OS 11.40.2) and higher.
+"""
+
+EXAMPLES = """
+ - name: Add two syslog server configurations to NetApp E-Series storage array.
+ netapp_e_syslog:
+ state: present
+ address: "{{ item }}"
+ port: 514
+ protocol: tcp
+ component: "auditLog"
+ api_url: "10.1.1.1:8443"
+ api_username: "admin"
+ api_password: "myPass"
+ loop:
+ - "192.168.1.1"
+ - "192.168.1.100"
+"""
+
+RETURN = """
+msg:
+ description: Success message
+ returned: on success
+ type: str
+ sample: The settings have been updated.
+syslog:
+ description:
+ - True if syslog server configuration has been added to e-series storage array.
+ returned: on success
+ sample: True
+ type: bool
+"""
+
+import json
+import logging
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec
+from ansible.module_utils._text import to_native
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+class Syslog(object):
+ def __init__(self):
+ argument_spec = eseries_host_argument_spec()
+ argument_spec.update(dict(
+ state=dict(choices=["present", "absent"], required=False, default="present"),
+ address=dict(type="str", required=False),
+ port=dict(type="int", default=514, required=False),
+ protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False),
+ components=dict(type="list", required=False, default=["auditLog"]),
+ test=dict(type="bool", default=False, require=False),
+ log_path=dict(type="str", required=False),
+ ))
+
+ required_if = [
+ ["state", "present", ["address", "port", "protocol", "components"]],
+ ]
+
+ mutually_exclusive = [
+ ["test", "absent"],
+ ]
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if,
+ mutually_exclusive=mutually_exclusive)
+ args = self.module.params
+
+ self.syslog = args["state"] in ["present"]
+ self.address = args["address"]
+ self.port = args["port"]
+ self.protocol = args["protocol"]
+ self.components = args["components"]
+ self.test = args["test"]
+ self.ssid = args["ssid"]
+ self.url = args["api_url"]
+ self.creds = dict(url_password=args["api_password"],
+ validate_certs=args["validate_certs"],
+ url_username=args["api_username"], )
+
+ self.components.sort()
+
+ self.check_mode = self.module.check_mode
+
+ # logging setup
+ log_path = args["log_path"]
+ self._logger = logging.getLogger(self.__class__.__name__)
+ if log_path:
+ logging.basicConfig(
+ level=logging.DEBUG, filename=log_path, filemode='w',
+ format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
+
+ if not self.url.endswith('/'):
+ self.url += '/'
+
+ def get_configuration(self):
+ """Retrieve existing syslog configuration."""
+ try:
+ (rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
+ headers=HEADERS, **self.creds)
+ return result
+ except Exception as err:
+ self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def test_configuration(self, body):
+ """Send test syslog message to the storage array.
+
+ Allows fix number of retries to occur before failure is issued to give the storage array time to create
+ new syslog server record.
+ """
+ try:
+ (rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}/test".format(self.ssid, body["id"]),
+ method='POST', headers=HEADERS, **self.creds)
+ except Exception as err:
+ self.module.fail_json(
+ msg="We failed to send test message! Array Id [{0}]. Error [{1}].".format(self.ssid, to_native(err)))
+
+ def update_configuration(self):
+ """Post the syslog request to array."""
+ config_match = None
+ perfect_match = None
+ update = False
+ body = dict()
+
+ # search existing configuration for syslog server entry match
+ configs = self.get_configuration()
+ if self.address:
+ for config in configs:
+ if config["serverAddress"] == self.address:
+ config_match = config
+ if (config["port"] == self.port and config["protocol"] == self.protocol and
+ len(config["components"]) == len(self.components) and
+ all([component["type"] in self.components for component in config["components"]])):
+ perfect_match = config_match
+ break
+
+ # generate body for the http request
+ if self.syslog:
+ if not perfect_match:
+ update = True
+ if config_match:
+ body.update(dict(id=config_match["id"]))
+ components = [dict(type=component_type) for component_type in self.components]
+ body.update(dict(serverAddress=self.address, port=self.port,
+ protocol=self.protocol, components=components))
+ self._logger.info(body)
+ self.make_configuration_request(body)
+
+ # remove specific syslog server configuration
+ elif self.address:
+ update = True
+ body.update(dict(id=config_match["id"]))
+ self._logger.info(body)
+ self.make_configuration_request(body)
+
+ # if no address is specified, remove all syslog server configurations
+ elif configs:
+ update = True
+ for config in configs:
+ body.update(dict(id=config["id"]))
+ self._logger.info(body)
+ self.make_configuration_request(body)
+
+ return update
+
+ def make_configuration_request(self, body):
+ # make http request(s)
+ if not self.check_mode:
+ try:
+ if self.syslog:
+ if "id" in body:
+ (rc, result) = request(
+ self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
+ method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
+ else:
+ (rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid),
+ method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)
+ body.update(result)
+
+ # send syslog test message
+ if self.test:
+ self.test_configuration(body)
+
+ elif "id" in body:
+ (rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]),
+ method='DELETE', headers=HEADERS, **self.creds)
+
+ # This is going to catch cases like a connection failure
+ except Exception as err:
+ self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]."
+ % (self.ssid, to_native(err)))
+
+ def update(self):
+ """Update configuration and respond to ansible."""
+ update = self.update_configuration()
+ self.module.exit_json(msg="The syslog settings have been updated.", changed=update)
+
+ def __call__(self, *args, **kwargs):
+ self.update()
+
+
+def main():
+ settings = Syslog()
+ settings()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py
new file mode 100644
index 00000000..0bac2cca
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py
@@ -0,0 +1,868 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_volume
+version_added: "2.2"
+short_description: NetApp E-Series manage storage volumes (standard and thin)
+description:
+ - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
+author:
+ - Kevin Hulquest (@hulquest)
+ - Nathan Swartz (@ndswartz)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ state:
+ description:
+ - Whether the specified volume should exist
+ required: true
+ type: str
+ choices: ['present', 'absent']
+ name:
+ description:
+ - The name of the volume to manage.
+ type: str
+ required: true
+ storage_pool_name:
+ description:
+ - Required only when requested I(state=='present').
+ - Name of the storage pool wherein the volume should reside.
+ type: str
+ required: false
+ size_unit:
+ description:
+ - The unit used to interpret the size parameter
+ choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
+ type: str
+ default: 'gb'
+ size:
+ description:
+ - Required only when I(state=='present').
+ - Size of the volume in I(size_unit).
+ - Size of the virtual volume in the case of a thin volume in I(size_unit).
+ - Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may
+ exist.
+ type: float
+ required: true
+ segment_size_kb:
+ description:
+ - Segment size of the volume
+ - All values are in kibibytes.
+ - Some common choices include '8', '16', '32', '64', '128', '256', and '512' but options are system
+ dependent.
+ - Retrieve the definitive system list from M(netapp_eseries.santricity.netapp_e_facts) under segment_sizes.
+ - When the storage pool is a raidDiskPool then the segment size must be 128kb.
+ - Segment size migrations are not allowed in this module
+ type: int
+ default: '128'
+ thin_provision:
+ description:
+ - Whether the volume should be thin provisioned.
+ - Thin volumes can only be created when I(raid_level=="raidDiskPool").
+ - Generally, use of thin-provisioning is not recommended due to performance impacts.
+ type: bool
+ default: false
+ thin_volume_repo_size:
+ description:
+ - This value (in size_unit) sets the allocated space for the thin provisioned repository.
+ - Initial value must between or equal to 4gb and 256gb in increments of 4gb.
+ - During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb.
+ - This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic").
+ - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
+ type: int
+ required: false
+ thin_volume_max_repo_size:
+ description:
+ - This is the maximum amount the thin volume repository will be allowed to grow.
+ - Only has significance when I(thin_volume_expansion_policy=="automatic").
+ - When the percentage I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds
+ I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute
+ the I(thin_volume_expansion_policy) policy.
+ - Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum
+ repository size.
+ - The default will be the same as size (in size_unit)
+ type: float
+ thin_volume_expansion_policy:
+ description:
+ - This is the thin volume expansion policy.
+ - When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the
+ I(thin_volume_max_repo_size) will be automatically expanded.
+ - When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the
+ storage system will wait for manual intervention.
+ - The thin volume_expansion policy can not be modified on existing thin volumes in this module.
+ - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic).
+ choices: ["automatic", "manual"]
+ default: "automatic"
+ type: str
+ version_added: 2.8
+ thin_volume_growth_alert_threshold:
+ description:
+ - This is the thin provision repository utilization threshold (in percent).
+ - When the percentage of used storage of the maximum repository size exceeds this value then a alert will
+ be issued and the I(thin_volume_expansion_policy) will be executed.
+ - Values must be between or equal to 10 and 99.
+ default: 95
+ type: int
+ version_added: 2.8
+ owning_controller:
+ description:
+ - Specifies which controller will be the primary owner of the volume
+ - Not specifying will allow the controller to choose ownership.
+ required: false
+ choices: ["A", "B"]
+ type: str
+ version_added: 2.9
+ ssd_cache_enabled:
+ description:
+ - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
+ - The default value is to ignore existing SSD cache setting.
+ type: bool
+ default: false
+ data_assurance_enabled:
+ description:
+ - Determines whether data assurance (DA) should be enabled for the volume
+ - Only available when creating a new volume and on a storage pool with drives supporting the DA capability.
+ type: bool
+ default: false
+ read_cache_enable:
+ description:
+ - Indicates whether read caching should be enabled for the volume.
+ type: bool
+ default: true
+ version_added: 2.8
+ read_ahead_enable:
+ description:
+ - Indicates whether or not automatic cache read-ahead is enabled.
+ - This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot
+ benefit from read ahead caching.
+ type: bool
+ default: true
+ version_added: 2.8
+ write_cache_enable:
+ description:
+ - Indicates whether write-back caching should be enabled for the volume.
+ type: bool
+ default: true
+ version_added: 2.8
+ cache_without_batteries:
+ description:
+ - Indicates whether caching should be used without battery backup.
+ - Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost!
+ type: bool
+ default: false
+ version_added: 2.9
+ workload_name:
+ description:
+ - Label for the workload defined by the metadata.
+ - When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage
+ array.
+ - When I(workload_name) exists on the storage array but the metadata is different then the workload
+ definition will be updated. (Changes will update all associated volumes!)
+ - Existing workloads can be retrieved using M(netapp_eseries.santricity.netapp_e_facts).
+ required: false
+ type: str
+ version_added: 2.8
+ metadata:
+ description:
+ - Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily
+ defined for whatever the user deems useful)
+ - When I(workload_name) exists on the storage array but the metadata is different then the workload
+ definition will be updated. (Changes will update all associated volumes!)
+ - I(workload_name) must be specified when I(metadata) are defined.
+ type: dict
+ required: false
+ version_added: 2.8
+ wait_for_initialization:
+ description:
+ - Forces the module to wait for expansion operations to complete before continuing.
+ type: bool
+ default: false
+ version_added: 2.8
+ initialization_timeout:
+ description:
+ - Duration in seconds before the wait_for_initialization operation will terminate.
+ - M(wait_for_initialization==True) to have any effect on module's operations.
+ type: int
+ required: false
+ version_added: 2.9
+"""
+EXAMPLES = """
+- name: Create simple volume with workload tags (volume meta data)
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+ name: volume
+ storage_pool_name: storage_pool
+ size: 300
+ size_unit: gb
+ workload_name: volume_tag
+ metadata:
+ key1: value1
+ key2: value2
+- name: Create a thin volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 131072
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+- name: Expand thin volume's virtual size
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 1024
+- name: Expand thin volume's maximum repository size
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: present
+ name: volume1
+ storage_pool_name: storage_pool
+ size: 262144
+ size_unit: gb
+ thin_provision: true
+ thin_volume_repo_size: 32
+ thin_volume_max_repo_size: 2048
+- name: Delete volume
+ netapp_e_volume:
+ ssid: "{{ ssid }}"
+ api_url: "{{ netapp_api_url }}"
+ api_username: "{{ netapp_api_username }}"
+ api_password: "{{ netapp_api_password }}"
+ validate_certs: "{{ netapp_api_validate_certs }}"
+ state: absent
+ name: volume
+"""
+RETURN = """
+msg:
+ description: State of volume
+ type: str
+ returned: always
+ sample: "Standard volume [workload_vol_1] has been created."
+"""
+from time import sleep
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule
+from ansible.module_utils._text import to_native
+
+
+class NetAppESeriesVolume(NetAppESeriesModule):
+ VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300
+
+ def __init__(self):
+ ansible_options = dict(
+ state=dict(required=True, choices=["present", "absent"]),
+ name=dict(required=True, type="str"),
+ storage_pool_name=dict(type="str"),
+ size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"],
+ type="str"),
+ size=dict(type="float"),
+ segment_size_kb=dict(type="int", default=128),
+ owning_controller=dict(required=False, choices=['A', 'B']),
+ ssd_cache_enabled=dict(type="bool", default=False),
+ data_assurance_enabled=dict(type="bool", default=False),
+ thin_provision=dict(type="bool", default=False),
+ thin_volume_repo_size=dict(type="int"),
+ thin_volume_max_repo_size=dict(type="float"),
+ thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"], default="automatic"),
+ thin_volume_growth_alert_threshold=dict(type="int", default=95),
+ read_cache_enable=dict(type="bool", default=True),
+ read_ahead_enable=dict(type="bool", default=True),
+ write_cache_enable=dict(type="bool", default=True),
+ cache_without_batteries=dict(type="bool", default=False),
+ workload_name=dict(type="str", required=False),
+ metadata=dict(type="dict", require=False),
+ wait_for_initialization=dict(type="bool", default=False),
+ initialization_timeout=dict(type="int", required=False))
+
+ required_if = [
+ ["state", "present", ["storage_pool_name", "size"]],
+ ["thin_provision", "true", ["thin_volume_repo_size"]]
+ ]
+
+ super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options,
+ web_services_version="02.00.0000.0000",
+ supports_check_mode=True,
+ required_if=required_if)
+
+ args = self.module.params
+ self.state = args["state"]
+ self.name = args["name"]
+ self.storage_pool_name = args["storage_pool_name"]
+ self.size_unit = args["size_unit"]
+ self.segment_size_kb = args["segment_size_kb"]
+ if args["size"]:
+ self.size_b = self.convert_to_aligned_bytes(args["size"])
+
+ self.owning_controller_id = None
+ if args["owning_controller"]:
+ self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002"
+
+ self.read_cache_enable = args["read_cache_enable"]
+ self.read_ahead_enable = args["read_ahead_enable"]
+ self.write_cache_enable = args["write_cache_enable"]
+ self.ssd_cache_enabled = args["ssd_cache_enabled"]
+ self.cache_without_batteries = args["cache_without_batteries"]
+ self.data_assurance_enabled = args["data_assurance_enabled"]
+
+ self.thin_provision = args["thin_provision"]
+ self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"]
+ self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"])
+ self.thin_volume_repo_size_b = None
+ self.thin_volume_max_repo_size_b = None
+
+ if args["thin_volume_repo_size"]:
+ self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"])
+ if args["thin_volume_max_repo_size"]:
+ self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"])
+
+ self.workload_name = args["workload_name"]
+ self.metadata = args["metadata"]
+ self.wait_for_initialization = args["wait_for_initialization"]
+ self.initialization_timeout = args["initialization_timeout"]
+
+ # convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to
+ # each of the workload attributes dictionary entries
+ metadata = []
+ if self.metadata:
+ if not self.workload_name:
+ self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified."
+ " Array [%s]." % self.ssid)
+ for key in self.metadata.keys():
+ metadata.append(dict(key=key, value=self.metadata[key]))
+ self.metadata = metadata
+
+ if self.thin_provision:
+ if not self.thin_volume_max_repo_size_b:
+ self.thin_volume_max_repo_size_b = self.size_b
+
+ if not self.thin_volume_expansion_policy:
+ self.thin_volume_expansion_policy = "automatic"
+
+ if self.size_b > 256 * 1024 ** 4:
+ self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size."
+ " Attempted size [%sg]" % (self.size_b * 1024 ** 3))
+
+ if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and
+ self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b):
+ self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum"
+ " repository size. Array [%s]." % self.ssid)
+
+ if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99:
+ self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99."
+ "thin_volume_growth_alert_threshold [%s]. Array [%s]."
+ % (self.thin_volume_growth_alert_threshold, self.ssid))
+
+ self.volume_detail = None
+ self.pool_detail = None
+ self.workload_id = None
+
+ def convert_to_aligned_bytes(self, size):
+ """Convert size to the truncated byte size that aligns on the segment size."""
+ size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit])
+ segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"])
+ segment_count = int(size_bytes / segment_size_bytes)
+ return segment_count * segment_size_bytes
+
+ def get_volume(self):
+ """Retrieve volume details from storage array."""
+ volumes = list()
+ thin_volumes = list()
+ try:
+ rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+ try:
+ rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+
+ volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name]
+ return volume_detail[0] if volume_detail else dict()
+
+ def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5):
+ """Waits until volume becomes available.
+
+ :raises AnsibleFailJson when retries are exhausted.
+ """
+ if retries == 0:
+ self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]."
+ % (self.name, self.ssid))
+ if not self.get_volume():
+ sleep(5)
+ self.wait_for_volume_availability(retries=retries - 1)
+
+ def wait_for_volume_action(self, timeout=None):
+ """Waits until volume action is complete is complete.
+ :param: int timeout: Wait duration measured in seconds. Waits indefinitely when None.
+ """
+ action = "unknown"
+ percent_complete = None
+ while action != "complete":
+ sleep(5)
+
+ try:
+ rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid)
+
+ # Search long lived operations for volume
+ action = "complete"
+ for operation in operations["longLivedOpsProgress"]:
+ if operation["volAction"] is not None:
+ for key in operation.keys():
+ if (operation[key] is not None and "volumeRef" in operation[key] and
+ (operation[key]["volumeRef"] == self.volume_detail["id"] or
+ ("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))):
+ action = operation["volAction"]
+ percent_complete = operation["init"]["percentComplete"]
+ except Exception as err:
+ self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(err)))
+
+ if timeout is not None:
+ if timeout <= 0:
+ self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining"
+ " [%s]. Array Id [%s]." % (action, percent_complete, self.ssid))
+ self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid))
+ if timeout:
+ timeout -= 5
+
+ self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete))
+ self.module.log("Expansion action is complete.")
+
+ def get_storage_pool(self):
+ """Retrieve storage pool details from the storage array."""
+ storage_pools = list()
+ try:
+ rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid)
+ except Exception as err:
+ self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]."
+ % (self.ssid, to_native(err)))
+
+ pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name]
+ return pool_detail[0] if pool_detail else dict()
+
+ def check_storage_pool_sufficiency(self):
+ """Perform a series of checks as to the sufficiency of the storage pool for the volume."""
+ if not self.pool_detail:
+ self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
+
+ if not self.volume_detail:
+ if self.thin_provision and not self.pool_detail['diskPool']:
+ self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.')
+
+ if (self.data_assurance_enabled and not
+ (self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and
+ self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")):
+ self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible."
+ " Array [%s]." % self.ssid)
+
+ if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision:
+ self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
+ " Array [%s]." % self.ssid)
+ else:
+ # Check for expansion
+ if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and
+ not self.thin_provision):
+ self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs."
+ " Array [%s]." % self.ssid)
+
+ def update_workload_tags(self, check_mode=False):
+ """Check the status of the workload tag and update storage array definitions if necessary.
+
+ When the workload attributes are not provided but an existing workload tag name is, then the attributes will be
+ used.
+
+ :return bool: Whether changes were required to be made."""
+ change_required = False
+ workload_tags = None
+ request_body = None
+ ansible_profile_id = None
+
+ if self.workload_name:
+ try:
+ rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
+ except Exception as error:
+ self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid)
+
+ # Generate common indexed Ansible workload tag
+ current_tag_index_list = [int(pair["value"].replace("ansible_workload_", ""))
+ for tag in workload_tags for pair in tag["workloadAttributes"]
+ if pair["key"] == "profileId" and "ansible_workload_" in pair["value"] and
+ str(pair["value"]).replace("ansible_workload_", "").isdigit()]
+
+ tag_index = 1
+ if current_tag_index_list:
+ tag_index = max(current_tag_index_list) + 1
+
+ ansible_profile_id = "ansible_workload_%d" % tag_index
+ request_body = dict(name=self.workload_name,
+ profileId=ansible_profile_id,
+ workloadInstanceIndex=None,
+ isValid=True)
+
+ # evaluate and update storage array when needed
+ for tag in workload_tags:
+ if tag["name"] == self.workload_name:
+ self.workload_id = tag["id"]
+
+ if not self.metadata:
+ break
+
+ # Determine if core attributes (everything but profileId) is the same
+ metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata)
+ tag_set = set(tuple(sorted(attr.items()))
+ for attr in tag["workloadAttributes"] if attr["key"] != "profileId")
+ if metadata_set != tag_set:
+ self.module.log("Workload tag change is required!")
+ change_required = True
+
+ # only perform the required action when check_mode==False
+ if change_required and not check_mode:
+ self.metadata.append(dict(key="profileId", value=ansible_profile_id))
+ request_body.update(dict(isNewWorkloadInstance=False,
+ isWorkloadDataInitialized=True,
+ isWorkloadCardDataToBeReset=True,
+ workloadAttributes=self.metadata))
+ try:
+ rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]),
+ data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+ self.module.log("Workload tag [%s] required change." % self.workload_name)
+ break
+
+ # existing workload tag not found so create new workload tag
+ else:
+ change_required = True
+ self.module.log("Workload tag creation is required!")
+
+ if change_required and not check_mode:
+ if self.metadata:
+ self.metadata.append(dict(key="profileId", value=ansible_profile_id))
+ else:
+ self.metadata = [dict(key="profileId", value=ansible_profile_id)]
+
+ request_body.update(dict(isNewWorkloadInstance=True,
+ isWorkloadDataInitialized=False,
+ isWorkloadCardDataToBeReset=False,
+ workloadAttributes=self.metadata))
+ try:
+ rc, resp = self.request("storage-systems/%s/workloads" % self.ssid,
+ method="POST", data=request_body)
+ self.workload_id = resp["id"]
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]"
+ % (self.ssid, to_native(error)))
+ self.module.log("Workload tag [%s] was added." % self.workload_name)
+
+ return change_required
+
+ def get_volume_property_changes(self):
+ """Retrieve the volume update request body when change(s) are required.
+
+ :raise AnsibleFailJson when attempting to change segment size on existing volume.
+ :return dict: request body when change(s) to a volume's properties are required.
+ """
+ change = False
+ request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[],
+ cacheSettings=dict(readCacheEnable=self.read_cache_enable,
+ writeCacheEnable=self.write_cache_enable))
+
+ # check for invalid modifications
+ if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]):
+ self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified."
+ % self.volume_detail["segmentSize"])
+
+ # common thick/thin volume properties
+ if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or
+ self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or
+ self.ssd_cache_enabled != self.volume_detail["flashCached"]):
+ change = True
+
+ # controller ownership
+ if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]:
+ change = True
+ request_body.update(dict(owningControllerId=self.owning_controller_id))
+
+ if self.workload_name:
+ request_body.update(dict(metaTags=[dict(key="workloadId", value=self.workload_id),
+ dict(key="volumeTypeId", value="volume")]))
+ if {"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"]:
+ change = True
+ elif self.volume_detail["metadata"]:
+ change = True
+
+ # thick/thin volume specific properties
+ if self.thin_provision:
+ if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]):
+ change = True
+ request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold))
+ if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]:
+ change = True
+ request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy))
+ else:
+ if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0):
+ change = True
+ request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable))
+ if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]:
+ change = True
+ request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries))
+
+ return request_body if change else dict()
+
+ def get_expand_volume_changes(self):
+ """Expand the storage specifications for the existing thick/thin volume.
+
+ :raise AnsibleFailJson when a thick/thin volume expansion request fails.
+ :return dict: dictionary containing all the necessary values for volume expansion request
+ """
+ request_body = dict()
+
+ if self.size_b < int(self.volume_detail["capacity"]):
+ self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]"
+ % (self.name, self.ssid))
+
+ if self.volume_detail["thinProvisioned"]:
+ if self.size_b > int(self.volume_detail["capacity"]):
+ request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b))
+ self.module.log("Thin volume virtual size have been expanded.")
+
+ if self.volume_detail["expansionPolicy"] == "automatic":
+ if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]):
+ request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b))
+ self.module.log("Thin volume maximum repository size have been expanded (automatic policy).")
+
+ elif self.volume_detail["expansionPolicy"] == "manual":
+ if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]):
+ change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"])
+ if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0:
+ self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb"
+ " and 256gb in increments of 4gb. Attempted size [%sg]."
+ % (self.thin_volume_repo_size_b * 1024 ** 3))
+
+ request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b))
+ self.module.log("Thin volume maximum repository size have been expanded (manual policy).")
+
+ elif self.size_b > int(self.volume_detail["capacity"]):
+ request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b))
+ self.module.log("Volume storage capacities have been expanded.")
+
+ return request_body
+
+ def create_volume(self):
+ """Create thick/thin volume according to the specified criteria."""
+ body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes",
+ dataAssuranceEnabled=self.data_assurance_enabled)
+
+ if self.thin_provision:
+ body.update(dict(virtualSize=self.size_b,
+ repositorySize=self.thin_volume_repo_size_b,
+ maximumRepositorySize=self.thin_volume_max_repo_size_b,
+ expansionPolicy=self.thin_volume_expansion_policy,
+ growthAlertThreshold=self.thin_volume_growth_alert_threshold))
+ try:
+ rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+
+ self.module.log("New thin volume created [%s]." % self.name)
+
+ else:
+ body.update(dict(size=self.size_b, segSize=self.segment_size_kb))
+ try:
+ rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+
+ self.module.log("New volume created [%s]." % self.name)
+
+ def update_volume_properties(self):
+ """Update existing thin-volume or volume properties.
+
+ :raise AnsibleFailJson when either thick/thin volume update request fails.
+ :return bool: whether update was applied
+ """
+ self.wait_for_volume_availability()
+ self.volume_detail = self.get_volume()
+
+ request_body = self.get_volume_property_changes()
+
+ if request_body:
+ if self.thin_provision:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s"
+ % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(error)))
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ data=request_body, method="POST")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]."
+ " Error[%s]." % (self.name, self.ssid, to_native(error)))
+ return True
+ return False
+
+ def expand_volume(self):
+ """Expand the storage specifications for the existing thick/thin volume.
+
+ :raise AnsibleFailJson when a thick/thin volume expansion request fails.
+ """
+ request_body = self.get_expand_volume_changes()
+ if request_body:
+ if self.volume_detail["thinProvisioned"]:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand"
+ % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(err)))
+ self.module.log("Thin volume specifications have been expanded.")
+
+ else:
+ try:
+ rc, resp = self.request(
+ "storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']),
+ data=request_body, method="POST")
+ except Exception as err:
+ self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(err)))
+
+ self.module.log("Volume storage capacities have been expanded.")
+
+ def delete_volume(self):
+ """Delete existing thin/thick volume."""
+ if self.thin_provision:
+ try:
+ rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+ self.module.log("Thin volume deleted [%s]." % self.name)
+ else:
+ try:
+ rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]),
+ method="DELETE")
+ except Exception as error:
+ self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]."
+ % (self.name, self.ssid, to_native(error)))
+ self.module.log("Volume deleted [%s]." % self.name)
+
+ def apply(self):
+ """Determine and apply any changes necessary to satisfy the specified criteria.
+
+ :raise AnsibleExitJson when completes successfully"""
+ change = False
+ msg = None
+
+ self.volume_detail = self.get_volume()
+ self.pool_detail = self.get_storage_pool()
+
+ # Determine whether changes need to be applied to existing workload tags
+ if self.state == 'present' and self.update_workload_tags(check_mode=True):
+ change = True
+
+ # Determine if any changes need to be applied
+ if self.volume_detail:
+ if self.state == 'absent':
+ change = True
+
+ elif self.state == 'present':
+ if self.get_expand_volume_changes() or self.get_volume_property_changes():
+ change = True
+
+ elif self.state == 'present':
+ if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or
+ self.thin_volume_repo_size_b > 256 * 1024 ** 3 or
+ self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0):
+ self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in"
+ " increments of 4gb. Attempted size [%sg]."
+ % (self.thin_volume_repo_size_b * 1024 ** 3))
+ change = True
+
+ self.module.log("Update required: [%s]." % change)
+
+ # Apply any necessary changes
+ if change and not self.module.check_mode:
+ if self.state == 'present':
+ if self.update_workload_tags():
+ msg = "Workload tag change occurred."
+
+ if not self.volume_detail:
+ self.check_storage_pool_sufficiency()
+ self.create_volume()
+ self.update_volume_properties()
+ msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created."
+ else:
+ if self.update_volume_properties():
+ msg = "Volume [%s] properties were updated."
+
+ if self.get_expand_volume_changes():
+ self.expand_volume()
+ msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded."
+
+ if self.wait_for_initialization:
+ self.module.log("Waiting for volume operation to complete.")
+ self.wait_for_volume_action(timeout=self.initialization_timeout)
+
+ elif self.state == 'absent':
+ self.delete_volume()
+ msg = "Volume [%s] has been deleted."
+
+ else:
+ msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists."
+
+ self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change)
+
+
+def main():
+ volume = NetAppESeriesVolume()
+ volume.apply()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py
new file mode 100644
index 00000000..a6748a54
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+
+# (c) 2016, NetApp, Inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['deprecated'],
+ 'supported_by': 'community'}
+
+DOCUMENTATION = """
+---
+module: netapp_e_volume_copy
+short_description: NetApp E-Series create volume copy pairs
+description:
+ - Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
+version_added: '2.2'
+author: Kevin Hulquest (@hulquest)
+extends_documentation_fragment:
+ - netapp_eseries.santricity.santricity.netapp.eseries
+options:
+ ssid:
+ description:
+ - Storage system identifier
+ type: str
+ default: '1'
+ api_username:
+ required: true
+ description:
+ - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_password:
+ required: true
+ description:
+ - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
+ type: str
+ api_url:
+ required: true
+ description:
+ - The url to the SANtricity WebServices Proxy or embedded REST API, for example C(https://prod-1.wahoo.acme.com/devmgr/v2).
+ type: str
+ validate_certs:
+ required: false
+ default: true
+ type: bool
+ description:
+ - Should https certificates be validated?
+ source_volume_id:
+ description:
+ - The id of the volume copy source.
+ - If used, must be paired with destination_volume_id
+ - Mutually exclusive with volume_copy_pair_id, and search_volume_id
+ type: str
+ destination_volume_id:
+ description:
+ - The id of the volume copy destination.
+ - If used, must be paired with source_volume_id
+ - Mutually exclusive with volume_copy_pair_id, and search_volume_id
+ type: str
+ volume_copy_pair_id:
+ description:
+ - The id of a given volume copy pair
+ - Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
+ - Can use to delete or check presence of volume pairs
+ - Must specify this or (destination_volume_id and source_volume_id)
+ type: str
+ state:
+ description:
+ - Whether the specified volume copy pair should exist or not.
+ required: True
+ choices: ['present', 'absent']
+ type: str
+ create_copy_pair_if_does_not_exist:
+ description:
+ - Defines if a copy pair will be created if it does not exist.
+ - If set to True destination_volume_id and source_volume_id are required.
+ type: bool
+ default: True
+ start_stop_copy:
+ description:
+ - starts a re-copy or stops a copy in progress
+ - "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
+ - Requires volume_copy_pair_id
+ type: str
+ choices: ['start', 'stop']
+ search_volume_id:
+ description:
+ - Searches for all valid potential target and source volumes that could be used in a copy_pair
+ - Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
+ type: str
+ copy_priority:
+ description:
+ - Copy priority level
+ required: False
+ default: 0
+ type: int
+ onlineCopy:
+ description:
+ - Whether copy should be online
+ required: False
+ default: False
+ type: bool
+ targetWriteProtected:
+ description:
+ - Whether target should be write protected
+ required: False
+ default: True
+ type: bool
+"""
+EXAMPLES = """
+---
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Json facts for the volume copy that was created.
+"""
+RETURN = """
+msg:
+ description: Success message
+ returned: success
+ type: str
+ sample: Created Volume Copy Pair with ID
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils._text import to_native
+from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request
+
+HEADERS = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+}
+
+
+def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, method='GET', url_username=params['api_username'],
+ url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ volume_copy_pair_id = None
+ for potential_copy_pair in resp:
+ if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
+ if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
+ volume_copy_pair_id = potential_copy_pair['id']
+
+ return volume_copy_pair_id
+
+
+def create_copy_pair(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
+ url = params['api_url'] + get_status
+
+ rData = {
+ "sourceId": params['source_volume_id'],
+ "targetId": params['destination_volume_id']
+ }
+
+ (rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 200:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def delete_copy_pair_by_copy_pair_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, ignore_errors=True, method='DELETE',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 204:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (rc, resp) = request(url, ignore_errors=True, method='DELETE',
+ url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
+ validate_certs=params['validate_certs'])
+ if rc != 200:
+ return False, (rc, resp)
+ else:
+ return True, (rc, resp)
+
+
+def start_stop_copy(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
+ params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='POST',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ return True, response_data[0]['percentComplete']
+ else:
+ return False, response_data
+
+
+def check_copy_status(params):
+ get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
+ params['ssid'], params['volume_copy_pair_id'])
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='GET',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ if response_data['percentComplete'] != -1:
+
+ return True, response_data['percentComplete']
+ else:
+ return False, response_data['percentComplete']
+ else:
+ return False, response_data
+
+
+def find_valid_copy_pair_targets_and_sources(params):
+ get_status = 'storage-systems/%s/volumes' % params['ssid']
+ url = params['api_url'] + get_status
+
+ (response_code, response_data) = request(url, ignore_errors=True, method='GET',
+ url_username=params['api_username'], url_password=params['api_password'],
+ headers=HEADERS,
+ validate_certs=params['validate_certs'])
+
+ if response_code == 200:
+ source_capacity = None
+ candidates = []
+ for volume in response_data:
+ if volume['id'] == params['search_volume_id']:
+ source_capacity = volume['capacity']
+ else:
+ candidates.append(volume)
+
+ potential_sources = []
+ potential_targets = []
+
+ for volume in candidates:
+ if volume['capacity'] > source_capacity:
+ if volume['volumeCopyTarget'] is False:
+ if volume['volumeCopySource'] is False:
+ potential_targets.append(volume['id'])
+ else:
+ if volume['volumeCopyTarget'] is False:
+ if volume['volumeCopySource'] is False:
+ potential_sources.append(volume['id'])
+
+ return potential_targets, potential_sources
+
+ else:
+ raise Exception("Response [%s]" % response_code)
+
+
+def main():
+ module = AnsibleModule(argument_spec=dict(
+ source_volume_id=dict(type='str'),
+ destination_volume_id=dict(type='str'),
+ copy_priority=dict(required=False, default=0, type='int'),
+ ssid=dict(type='str', default='1'),
+ api_url=dict(required=True),
+ api_username=dict(required=False),
+ api_password=dict(required=False, no_log=True),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ targetWriteProtected=dict(required=False, default=True, type='bool'),
+ onlineCopy=dict(required=False, default=False, type='bool'),
+ volume_copy_pair_id=dict(type='str'),
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
+ start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
+ search_volume_id=dict(type='str'),
+ ),
+ mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
+ ['volume_copy_pair_id', 'source_volume_id'],
+ ['volume_copy_pair_id', 'search_volume_id'],
+ ['search_volume_id', 'destination_volume_id'],
+ ['search_volume_id', 'source_volume_id'],
+ ],
+ required_together=[['source_volume_id', 'destination_volume_id'],
+ ],
+ required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
+ ["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
+ ["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
+ ]
+
+ )
+ params = module.params
+
+ if not params['api_url'].endswith('/'):
+ params['api_url'] += '/'
+
+ # Check if we want to search
+ if params['search_volume_id'] is not None:
+ try:
+ potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
+ except Exception as e:
+ module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % to_native(e))
+
+ module.exit_json(changed=False,
+ msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
+ search_volume_id=params['search_volume_id'],
+ valid_targets=potential_targets,
+ valid_sources=potential_sources)
+
+ # Check if we want to start or stop a copy operation
+ if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
+
+ # Get the current status info
+ currenty_running, status_info = check_copy_status(params)
+
+ # If we want to start
+ if params['start_stop_copy'] == 'start':
+
+ # If we have already started
+ if currenty_running is True:
+ module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
+ volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
+ # If we need to start
+ else:
+
+ start_status, info = start_stop_copy(params)
+
+ if start_status is True:
+ module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
+ volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
+ else:
+ module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
+
+ # If we want to stop
+ else:
+ # If it has already stopped
+ if currenty_running is False:
+ module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
+ volume_copy_pair_id=params['volume_copy_pair_id'])
+
+ # If we need to stop it
+ else:
+ start_status, info = start_stop_copy(params)
+
+ if start_status is True:
+ module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
+ volume_copy_pair_id=params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
+
+ # If we want the copy pair to exist we do this stuff
+ if params['state'] == 'present':
+
+ # We need to check if it exists first
+ if params['volume_copy_pair_id'] is None:
+ params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
+ params)
+
+ # If no volume copy pair is found we need need to make it.
+ if params['volume_copy_pair_id'] is None:
+
+ # In order to create we can not do so with just a volume_copy_pair_id
+
+ copy_began_status, (rc, resp) = create_copy_pair(params)
+
+ if copy_began_status is True:
+ module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
+ else:
+ module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
+
+ # If it does exist we do nothing
+ else:
+ # We verify that it exists
+ exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
+ params)
+
+ if exist_status:
+ module.exit_json(changed=False,
+ msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
+ else:
+ if exist_status_code == 404:
+ module.fail_json(
+ msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
+ params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
+ exist_status_code, exist_status_data))
+
+ module.fail_json(msg="Done")
+
+ # If we want it to not exist we do this
+ else:
+
+ if params['volume_copy_pair_id'] is None:
+ params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
+ params)
+
+ # We delete it by the volume_copy_pair_id
+ delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
+
+ if delete_status is True:
+ module.exit_json(changed=True,
+ msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
+ else:
+ if delete_status_code == 404:
+ module.exit_json(changed=False,
+ msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
+ else:
+ module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
+ delete_status_code, delete_status_data))
+
+
+if __name__ == '__main__':
+ main()