diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-13 12:04:41 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-13 12:04:41 +0000 |
commit | 975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch) | |
tree | 89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/netapp_eseries/santricity/plugins/modules | |
parent | Initial commit. (diff) | |
download | ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip |
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/netapp_eseries/santricity/plugins/modules')
55 files changed, 23694 insertions, 0 deletions
diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py new file mode 100644 index 000000000..2c105b773 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts.py @@ -0,0 +1,253 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_alerts +short_description: NetApp E-Series manage email notification settings +description: + - Certain E-Series systems have the capability to send email notifications on potentially critical events. + - This module will allow the owner of the system to specify email recipients for these messages. +author: Michael Price (@lmprice) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - Enable/disable the sending of email-based alerts. + type: str + default: enabled + required: false + choices: + - enabled + - disabled + server: + description: + - A fully qualified domain name, IPv4 address, or IPv6 address of a mail server. + - To use a fully qualified domain name, you must configure a DNS server on both controllers using + M(na_santricity_mgmt_interface). + - Required when I(state=enabled). + type: str + required: false + sender: + description: + - This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account. + - Required when I(state=enabled). + type: str + required: false + contact: + description: + - Allows the owner to specify some free-form contact information to be included in the emails. + - This is typically utilized to provide a contact phone number. + type: str + required: false + recipients: + description: + - The email addresses that will receive the email notifications. + - Required when I(state=enabled). + type: list + required: false + test: + description: + - When a change is detected in the configuration, a test email will be sent. + - This may take a few minutes to process. + - Only applicable if I(state=enabled). + type: bool + default: false +notes: + - Check mode is supported. + - Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples + of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical + events. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher. +""" + +EXAMPLES = """ + - name: Enable email-based alerting + na_santricity_alerts: + state: enabled + sender: noreply@example.com + server: mail@example.com + contact: "Phone: 1-555-555-5555" + recipients: + - name1@example.com + - name2@example.com + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Disable alerting + na_santricity_alerts: + state: disabled + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +""" +import re + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesAlerts(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']), + server=dict(type='str', required=False), + sender=dict(type='str', required=False), + contact=dict(type='str', required=False), + recipients=dict(type='list', required=False), + test=dict(type='bool', required=False, default=False)) + + required_if = [['state', 'enabled', ['server', 'sender', 'recipients']]] + super(NetAppESeriesAlerts, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + required_if=required_if, + supports_check_mode=True) + + args = self.module.params + self.alerts = args['state'] == 'enabled' + self.server = args['server'] + self.sender = args['sender'] + self.contact = args['contact'] + self.recipients = args['recipients'] + self.test = args['test'] + self.check_mode = self.module.check_mode + + # Very basic validation on email addresses: xx@yy.zz + email = re.compile(r"[^@]+@[^@]+\.[^@]+") + + if self.sender and not email.match(self.sender): + self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender) + + if self.recipients is not None: + for recipient in self.recipients: + if not email.match(recipient): + self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient) + + if len(self.recipients) < 1: + self.module.fail_json(msg="At least one recipient address must be specified.") + + def get_configuration(self): + """Retrieve the current storage system alert settings.""" + if self.is_proxy(): + if self.is_embedded_available(): + try: + rc, result = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts" % self.ssid) + return result + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + else: + self.module.fail_json(msg="Setting SANtricity alerts is only available from SANtricity Web Services Proxy if the storage system has" + " SANtricity Web Services Embedded available. Array [%s]." % self.ssid) + else: + try: + rc, result = self.request("storage-systems/%s/device-alerts" % self.ssid) + return result + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update_configuration(self): + """Update the storage system alert settings.""" + config = self.get_configuration() + update = False + body = dict() + + if self.alerts: + body = dict(alertingEnabled=True) + if not config['alertingEnabled']: + update = True + + body.update(emailServerAddress=self.server) + if config['emailServerAddress'] != self.server: + update = True + + body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True) + if self.contact and (self.contact != config['additionalContactInformation'] + or not config['sendAdditionalContactInformation']): + update = True + + body.update(emailSenderAddress=self.sender) + if config['emailSenderAddress'] != self.sender: + update = True + + self.recipients.sort() + if config['recipientEmailAddresses']: + config['recipientEmailAddresses'].sort() + + body.update(recipientEmailAddresses=self.recipients) + if config['recipientEmailAddresses'] != self.recipients: + update = True + + elif config['alertingEnabled']: + body = {"alertingEnabled": False, "emailServerAddress": "", "emailSenderAddress": "", "sendAdditionalContactInformation": False, + "additionalContactInformation": "", "recipientEmailAddresses": []} + update = True + + if update and not self.check_mode: + if self.is_proxy() and self.is_embedded_available(): + try: + rc, result = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts" % self.ssid, method="POST", data=body) + except Exception as err: + self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + else: + try: + rc, result = self.request("storage-systems/%s/device-alerts" % self.ssid, method="POST", data=body) + except Exception as err: + self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + return update + + def send_test_email(self): + """Send a test email to verify that the provided configuration is valid and functional.""" + if not self.check_mode: + if self.is_proxy() and self.is_embedded_available(): + try: + rc, resp = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/device-alerts/alert-email-test" % self.ssid, method="POST") + if resp['response'] != 'emailSentOK': + self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]." % (resp['response'], self.ssid)) + except Exception as err: + self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + else: + try: + rc, resp = self.request("storage-systems/%s/device-alerts/alert-email-test" % self.ssid, method="POST") + if resp['response'] != 'emailSentOK': + self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]." % (resp['response'], self.ssid)) + except Exception as err: + self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update(self): + update = self.update_configuration() + + if self.test and update: + self.send_test_email() + + if self.alerts: + msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender) + else: + msg = 'Alerting has been disabled.' + + self.module.exit_json(msg=msg, changed=update) + + +def main(): + alerts = NetAppESeriesAlerts() + alerts.update() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py new file mode 100644 index 000000000..9a50dea0c --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_alerts_syslog.py @@ -0,0 +1,176 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_alerts_syslog +short_description: NetApp E-Series manage syslog servers receiving storage system alerts. +description: + - Manage the list of syslog servers that will notifications on potentially critical events. +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + servers: + description: + - List of dictionaries where each dictionary contains a syslog server entry. + type: list + required: False + suboptions: + address: + description: + - Syslog server address can be a fully qualified domain name, IPv4 address, or IPv6 address. + required: true + port: + description: + - UDP Port must be a numerical value between 0 and 65535. Typically, the UDP Port for syslog is 514. + required: false + default: 514 + test: + description: + - This forces a test syslog message to be sent to the stated syslog server. + - Test will only be issued when a change is made. + type: bool + default: false +notes: + - Check mode is supported. + - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with + SANtricity OS 11.40.2) and higher. +""" + +EXAMPLES = """ + - name: Add two syslog server configurations to NetApp E-Series storage array. + na_santricity_alerts_syslog: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + servers: + - address: "192.168.1.100" + - address: "192.168.2.100" + port: 514 + - address: "192.168.3.100" + port: 1000 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +""" +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesAlertsSyslog(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(servers=dict(type="list", required=False), + test=dict(type="bool", default=False, require=False)) + + required_if = [["state", "present", ["address"]]] + mutually_exclusive = [["test", "absent"]] + super(NetAppESeriesAlertsSyslog, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + args = self.module.params + if args["servers"] and len(args["servers"]) > 5: + self.module.fail_json(msg="Maximum number of syslog servers is 5! Array Id [%s]." % self.ssid) + + self.servers = {} + if args["servers"] is not None: + for server in args["servers"]: + port = 514 + if "port" in server: + port = server["port"] + self.servers.update({server["address"]: port}) + + self.test = args["test"] + self.check_mode = self.module.check_mode + + # Check whether request needs to be forwarded on to the controller web services rest api. + self.url_path_prefix = "" + if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy": + self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid + + def get_current_configuration(self): + """Retrieve existing alert-syslog configuration.""" + try: + rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog" % ("1" if self.url_path_prefix else self.ssid)) + return result + except Exception as error: + self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def is_change_required(self): + """Determine whether changes are required.""" + current_config = self.get_current_configuration() + + # When syslog servers should exist, search for them. + if self.servers: + for entry in current_config["syslogReceivers"]: + if entry["serverName"] not in self.servers.keys() or entry["portNumber"] != self.servers[entry["serverName"]]: + return True + + for server, port in self.servers.items(): + for entry in current_config["syslogReceivers"]: + if server == entry["serverName"] and port == entry["portNumber"]: + break + else: + return True + return False + + elif current_config["syslogReceivers"]: + return True + + return False + + def make_request_body(self): + """Generate the request body.""" + body = {"syslogReceivers": [], "defaultFacility": 3, "defaultTag": "StorageArray"} + + for server, port in self.servers.items(): + body["syslogReceivers"].append({"serverName": server, "portNumber": port}) + + return body + + def test_configuration(self): + """Send syslog test message to all systems (only option).""" + try: + rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog-test" + % ("1" if self.url_path_prefix else self.ssid), method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to send test message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def update(self): + """Update configuration and respond to ansible.""" + change_required = self.is_change_required() + + if change_required and not self.check_mode: + try: + rc, result = self.request(self.url_path_prefix + "storage-systems/%s/device-alerts/alert-syslog" % ("1" if self.url_path_prefix else self.ssid), + method="POST", data=self.make_request_body()) + except Exception as error: + self.module.fail_json(msg="Failed to add syslog server! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + if self.test and self.servers: + self.test_configuration() + + self.module.exit_json(msg="The syslog settings have been updated.", changed=change_required) + + +def main(): + settings = NetAppESeriesAlertsSyslog() + settings.update() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py new file mode 100644 index 000000000..8d6a33620 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_asup.py @@ -0,0 +1,544 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_asup +short_description: NetApp E-Series manage auto-support settings +description: + - Allow the auto-support settings to be configured for an individual E-Series storage-system +author: + - Michael Price (@lmprice) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - Enable/disable the E-Series auto-support configuration or maintenance mode. + - When this option is enabled, configuration, logs, and other support-related information will be relayed + to NetApp to help better support your system. No personally identifiable information, passwords, etc, will + be collected. + - The maintenance state enables the maintenance window which allows maintenance activities to be performed on the storage array without + generating support cases. + - Maintenance mode cannot be enabled unless ASUP has previously been enabled. + type: str + default: enabled + choices: + - enabled + - disabled + - maintenance_enabled + - maintenance_disabled + active: + description: + - Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's + possible that the bundle did not contain all of the required information at the time of the event. + Enabling this option allows NetApp support personnel to manually request transmission or re-transmission + of support data in order ot resolve the problem. + - Only applicable if I(state=enabled). + default: true + type: bool + start: + description: + - A start hour may be specified in a range from 0 to 23 hours. + - ASUP bundles will be sent daily between the provided start and end time (UTC). + - I(start) must be less than I(end). + type: int + default: 0 + end: + description: + - An end hour may be specified in a range from 1 to 24 hours. + - ASUP bundles will be sent daily between the provided start and end time (UTC). + - I(start) must be less than I(end). + type: int + default: 24 + days: + description: + - A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one + of the provided days. + type: list + choices: + - monday + - tuesday + - wednesday + - thursday + - friday + - saturday + - sunday + required: false + aliases: + - schedule_days + - days_of_week + method: + description: + - AutoSupport dispatch delivery method. + choices: + - https + - http + - email + type: str + required: false + default: https + routing_type: + description: + - AutoSupport routing + - Required when M(method==https or method==http). + choices: + - direct + - proxy + - script + type: str + default: direct + required: false + proxy: + description: + - Information particular to the proxy delivery method. + - Required when M((method==https or method==http) and routing_type==proxy). + type: dict + required: false + suboptions: + host: + description: + - Proxy host IP address or fully qualified domain name. + - Required when M(method==http or method==https) and M(routing_type==proxy). + type: str + required: false + port: + description: + - Proxy host port. + - Required when M(method==http or method==https) and M(routing_type==proxy). + type: int + required: false + script: + description: + - Path to the AutoSupport routing script file. + - Required when M(method==http or method==https) and M(routing_type==script). + type: str + required: false + username: + description: + - Username for the proxy. + type: str + required: false + password: + description: + - Password for the proxy. + type: str + required: false + email: + description: + - Information particular to the e-mail delivery method. + - Uses the SMTP protocol. + - Required when M(method==email). + type: dict + required: false + suboptions: + server: + description: + - Mail server's IP address or fully qualified domain name. + - Required when M(routing_type==email). + type: str + required: false + sender: + description: + - Sender's email account + - Required when M(routing_type==email). + type: str + required: false + test_recipient: + description: + - Test verification email + - Required when M(routing_type==email). + type: str + required: false + maintenance_duration: + description: + - The duration of time the ASUP maintenance mode will be active. + - Permittable range is between 1 and 72 hours. + - Required when I(state==maintenance_enabled). + type: int + default: 24 + required: false + maintenance_emails: + description: + - List of email addresses for maintenance notifications. + - Required when I(state==maintenance_enabled). + type: list + required: false + validate: + description: + - Validate ASUP configuration. + type: bool + default: false + required: false +notes: + - Check mode is supported. + - Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively + respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be + disabled if desired. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher. +""" + +EXAMPLES = """ + - name: Enable ASUP and allow pro-active retrieval of bundles + na_santricity_asup: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: enabled + active: true + days: ["saturday", "sunday"] + start: 17 + end: 20 + - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST. + na_santricity_asup: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: disabled + - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST. + na_santricity_asup: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + state: maintenance_enabled + maintenance_duration: 24 + maintenance_emails: + - admin@example.com + - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST. + na_santricity_asup: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: maintenance_disabled +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +asup: + description: + - True if ASUP is enabled. + returned: on success + sample: true + type: bool +active: + description: + - True if the active option has been enabled. + returned: on success + sample: true + type: bool +cfg: + description: + - Provide the full ASUP configuration. + returned: on success + type: complex + contains: + asupEnabled: + description: + - True if ASUP has been enabled. + type: bool + onDemandEnabled: + description: + - True if ASUP active monitoring has been enabled. + type: bool + daysOfWeek: + description: + - The days of the week that ASUP bundles will be sent. + type: list +""" +import time + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesAsup(NetAppESeriesModule): + DAYS_OPTIONS = ["sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"] + + def __init__(self): + + ansible_options = dict( + state=dict(type="str", required=False, default="enabled", choices=["enabled", "disabled", "maintenance_enabled", "maintenance_disabled"]), + active=dict(type="bool", required=False, default=True), + days=dict(type="list", required=False, aliases=["schedule_days", "days_of_week"], choices=self.DAYS_OPTIONS), + start=dict(type="int", required=False, default=0), + end=dict(type="int", required=False, default=24), + method=dict(type="str", required=False, choices=["https", "http", "email"], default="https"), + routing_type=dict(type="str", required=False, choices=["direct", "proxy", "script"], default="direct"), + proxy=dict(type="dict", required=False, options=dict(host=dict(type="str", required=False), + port=dict(type="int", required=False), + script=dict(type="str", required=False), + username=dict(type="str", required=False), + password=dict(type="str", no_log=True, required=False))), + email=dict(type="dict", required=False, options=dict(server=dict(type="str", required=False), + sender=dict(type="str", required=False), + test_recipient=dict(type="str", required=False))), + maintenance_duration=dict(type="int", required=False, default=24), + maintenance_emails=dict(type="list", required=False), + validate=dict(type="bool", require=False, default=False)) + + mutually_exclusive = [["host", "script"], + ["port", "script"]] + + required_if = [["method", "https", ["routing_type"]], + ["method", "http", ["routing_type"]], + ["method", "email", ["email"]], + ["state", "maintenance_enabled", ["maintenance_duration", "maintenance_emails"]]] + + super(NetAppESeriesAsup, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + + args = self.module.params + self.state = args["state"] + self.active = args["active"] + self.days = args["days"] + self.start = args["start"] + self.end = args["end"] + + self.method = args["method"] + self.routing_type = args["routing_type"] if args["routing_type"] else "none" + self.proxy = args["proxy"] + self.email = args["email"] + self.maintenance_duration = args["maintenance_duration"] + self.maintenance_emails = args["maintenance_emails"] + self.validate = args["validate"] + + if self.validate and self.email and "test_recipient" not in self.email.keys(): + self.module.fail_json(msg="test_recipient must be provided for validating email delivery method. Array [%s]" % self.ssid) + + self.check_mode = self.module.check_mode + + if self.start >= self.end: + self.module.fail_json(msg="The value provided for the start time is invalid." + " It must be less than the end time.") + if self.start < 0 or self.start > 23: + self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.") + else: + self.start = self.start * 60 + if self.end < 1 or self.end > 24: + self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.") + else: + self.end = min(self.end * 60, 1439) + + if self.maintenance_duration < 1 or self.maintenance_duration > 72: + self.module.fail_json(msg="The maintenance duration must be equal to or between 1 and 72 hours.") + + if not self.days: + self.days = self.DAYS_OPTIONS + + # Check whether request needs to be forwarded on to the controller web services rest api. + self.url_path_prefix = "" + if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy": + self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid + + def get_configuration(self): + try: + rc, result = self.request(self.url_path_prefix + "device-asup") + + if not (result["asupCapable"] and result["onDemandCapable"]): + self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % self.ssid) + return result + + except Exception as err: + self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def in_maintenance_mode(self): + """Determine whether storage device is currently in maintenance mode.""" + results = False + try: + rc, key_values = self.request(self.url_path_prefix + "key-values") + + for key_value in key_values: + if key_value["key"] == "ansible_asup_maintenance_email_list": + if not self.maintenance_emails: + self.maintenance_emails = key_value["value"].split(",") + elif key_value["key"] == "ansible_asup_maintenance_stop_time": + if time.time() < float(key_value["value"]): + results = True + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve maintenance windows information! Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return results + + def update_configuration(self): + config = self.get_configuration() + update = False + body = dict() + + # Build request body + if self.state == "enabled": + body = dict(asupEnabled=True) + if not config["asupEnabled"]: + update = True + + if (config["onDemandEnabled"] and config["remoteDiagsEnabled"]) != self.active: + update = True + body.update(dict(onDemandEnabled=self.active, + remoteDiagsEnabled=self.active)) + self.days.sort() + config["schedule"]["daysOfWeek"].sort() + + body["schedule"] = dict(daysOfWeek=self.days, + dailyMinTime=self.start, + dailyMaxTime=self.end, + weeklyMinTime=self.start, + weeklyMaxTime=self.end) + + if self.days != config["schedule"]["daysOfWeek"]: + update = True + if self.start != config["schedule"]["dailyMinTime"] or self.start != config["schedule"]["weeklyMinTime"]: + update = True + elif self.end != config["schedule"]["dailyMaxTime"] or self.end != config["schedule"]["weeklyMaxTime"]: + update = True + + if self.method in ["https", "http"]: + if self.routing_type == "direct": + body["delivery"] = dict(method=self.method, + routingType="direct") + elif self.routing_type == "proxy": + body["delivery"] = dict(method=self.method, + proxyHost=self.proxy["host"], + proxyPort=self.proxy["port"], + routingType="proxyServer") + if "username" in self.proxy.keys(): + body["delivery"].update({"proxyUserName": self.proxy["username"]}) + if "password" in self.proxy.keys(): + body["delivery"].update({"proxyPassword": self.proxy["password"]}) + + elif self.routing_type == "script": + body["delivery"] = dict(method=self.method, + proxyScript=self.proxy["script"], + routingType="proxyScript") + + else: + body["delivery"] = dict(method="smtp", + mailRelayServer=self.email["server"], + mailSenderAddress=self.email["sender"], + routingType="none") + + # Check whether changes are required. + if config["delivery"]["method"] != body["delivery"]["method"]: + update = True + elif config["delivery"]["method"] in ["https", "http"]: + if config["delivery"]["routingType"] != body["delivery"]["routingType"]: + update = True + elif config["delivery"]["routingType"] == "proxyServer": + if (config["delivery"]["proxyHost"] != body["delivery"]["proxyHost"] or + config["delivery"]["proxyPort"] != body["delivery"]["proxyPort"] or + config["delivery"]["proxyUserName"] != body["delivery"]["proxyUserName"] or + config["delivery"]["proxyPassword"] != body["delivery"]["proxyPassword"]): + update = True + elif config["delivery"]["routingType"] == "proxyScript": + if config["delivery"]["proxyScript"] != body["delivery"]["proxyScript"]: + update = True + elif (config["delivery"]["method"] == "smtp" and + config["delivery"]["mailRelayServer"] != body["delivery"]["mailRelayServer"] and + config["delivery"]["mailSenderAddress"] != body["delivery"]["mailSenderAddress"]): + update = True + + if self.in_maintenance_mode(): + update = True + + elif self.state == "disabled": + if config["asupEnabled"]: # Disable asupEnable is asup is disabled. + body = dict(asupEnabled=False) + update = True + + else: + if not config["asupEnabled"]: + self.module.fail_json(msg="AutoSupport must be enabled before enabling or disabling maintenance mode. Array [%s]." % self.ssid) + + if self.in_maintenance_mode() or self.state == "maintenance_enabled": + update = True + + # Apply required changes. + if update and not self.check_mode: + if self.state == "maintenance_enabled": + try: + rc, response = self.request(self.url_path_prefix + "device-asup/maintenance-window", method="POST", + data=dict(maintenanceWindowEnabled=True, + duration=self.maintenance_duration, + emailAddresses=self.maintenance_emails)) + except Exception as error: + self.module.fail_json(msg="Failed to enabled ASUP maintenance window. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + # Add maintenance information to the key-value store + try: + rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_email_list", method="POST", + data=",".join(self.maintenance_emails)) + rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_stop_time", method="POST", + data=str(time.time() + 60 * 60 * self.maintenance_duration)) + except Exception as error: + self.module.fail_json(msg="Failed to store maintenance information. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + elif self.state == "maintenance_disabled": + try: + rc, response = self.request(self.url_path_prefix + "device-asup/maintenance-window", method="POST", + data=dict(maintenanceWindowEnabled=False, + emailAddresses=self.maintenance_emails)) + except Exception as error: + self.module.fail_json(msg="Failed to disable ASUP maintenance window. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + # Remove maintenance information to the key-value store + try: + rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_email_list", method="DELETE") + rc, response = self.request(self.url_path_prefix + "key-values/ansible_asup_maintenance_stop_time", method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to store maintenance information. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + else: + if body["asupEnabled"] and self.validate: + validate_body = dict(delivery=body["delivery"]) + if self.email: + validate_body["mailReplyAddress"] = self.email["test_recipient"] + + try: + rc, response = self.request(self.url_path_prefix + "device-asup/verify-config", timeout=600, method="POST", data=validate_body) + except Exception as err: + self.module.fail_json(msg="Failed to validate ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + try: + rc, response = self.request(self.url_path_prefix + "device-asup", method="POST", data=body) + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="Failed to change ASUP configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + return update + + def apply(self): + update = self.update_configuration() + cfg = self.get_configuration() + + if update: + self.module.exit_json(msg="The ASUP settings have been updated.", changed=update, asup=cfg["asupEnabled"], active=cfg["onDemandEnabled"], cfg=cfg) + else: + self.module.exit_json(msg="No ASUP changes required.", changed=update, asup=cfg["asupEnabled"], active=cfg["onDemandEnabled"], cfg=cfg) + + +def main(): + asup = NetAppESeriesAsup() + asup.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py new file mode 100644 index 000000000..03a533fe2 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auditlog.py @@ -0,0 +1,200 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_auditlog +short_description: NetApp E-Series manage audit-log configuration +description: + - This module allows an e-series storage system owner to set audit-log configuration parameters. +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + max_records: + description: + - The maximum number log messages audit-log will retain. + - Max records must be between and including 100 and 50000. + type: int + default: 50000 + log_level: + description: Filters the log messages according to the specified log level selection. + choices: + - all + - writeOnly + type: str + default: writeOnly + full_policy: + description: Specifies what audit-log should do once the number of entries approach the record limit. + choices: + - overWrite + - preventSystemAccess + type: str + default: overWrite + threshold: + description: + - This is the memory full percent threshold that audit-log will start issuing warning messages. + - Percent range must be between and including 60 and 90. + type: int + default: 90 + force: + description: + - Forces the audit-log configuration to delete log history when log messages fullness cause immediate + warning or full condition. + - Warning! This will cause any existing audit-log messages to be deleted. + - This is only applicable for I(full_policy=preventSystemAccess). + type: bool + default: no +notes: + - Check mode is supported. + - Use I(ssid=="0") or I(ssid=="proxy") to configure SANtricity Web Services Proxy auditlog settings otherwise. +""" + +EXAMPLES = """ +- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity. + na_santricity_auditlog: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + max_records: 50000 + log_level: all + full_policy: preventSystemAccess + threshold: 60 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +""" +import json + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesAuditLog(NetAppESeriesModule): + """Audit-log module configuration class.""" + MAX_RECORDS = 50000 + + def __init__(self): + ansible_options = dict(max_records=dict(type="int", default=50000), + log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]), + full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]), + threshold=dict(type="int", default=90), + force=dict(type="bool", default=False)) + super(NetAppESeriesAuditLog, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.log_level = args["log_level"] + self.force = args["force"] + self.full_policy = args["full_policy"] + self.max_records = args["max_records"] + self.threshold = args["threshold"] + + if self.max_records < 100 or self.max_records > self.MAX_RECORDS: + self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]" % self.max_records) + + if self.threshold < 60 or self.threshold > 90: + self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold) + + # Append web services proxy forward end point. + self.url_path_prefix = "" + if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy": + self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid + + def get_configuration(self): + """Retrieve the existing audit-log configurations. + + :returns: dictionary containing current audit-log configuration + """ + try: + if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"): + rc, data = self.request("audit-log/config") + else: + rc, data = self.request(self.url_path_prefix + "storage-systems/1/audit-log/config") + return data + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def build_configuration(self): + """Build audit-log expected configuration. + + :returns: Tuple containing update boolean value and dictionary of audit-log configuration + """ + config = self.get_configuration() + + current = dict(auditLogMaxRecords=config["auditLogMaxRecords"], + auditLogLevel=config["auditLogLevel"], + auditLogFullPolicy=config["auditLogFullPolicy"], + auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"]) + + body = dict(auditLogMaxRecords=self.max_records, + auditLogLevel=self.log_level, + auditLogFullPolicy=self.full_policy, + auditLogWarningThresholdPct=self.threshold) + + update = current != body + return update, body + + def delete_log_messages(self): + """Delete all audit-log messages.""" + try: + if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"): + rc, result = self.request("audit-log?clearAll=True", method="DELETE") + else: + rc, result = self.request(self.url_path_prefix + "storage-systems/1/audit-log?clearAll=True", method="DELETE") + except Exception as err: + self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update_configuration(self, update=None, body=None, attempt_recovery=True): + """Update audit-log configuration.""" + if update is None or body is None: + update, body = self.build_configuration() + + if update and not self.module.check_mode: + try: + if self.is_proxy() and (self.ssid == "0" or self.ssid.lower() != "proxy"): + rc, result = self.request("audit-log/config", data=json.dumps(body), method='POST', ignore_errors=True) + else: + rc, result = self.request(self.url_path_prefix + "storage-systems/1/audit-log/config", + data=json.dumps(body), method='POST', ignore_errors=True) + + if rc == 422: + if self.force and attempt_recovery: + self.delete_log_messages() + update = self.update_configuration(update, body, False) + else: + self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(rc, result))) + + except Exception as error: + self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + return update + + def update(self): + """Update the audit-log configuration.""" + update = self.update_configuration() + if update: + self.module.exit_json(msg="Audit-log update complete", changed=update) + else: + self.module.exit_json(msg="No audit-log changes required", changed=update) + + +def main(): + auditlog = NetAppESeriesAuditLog() + auditlog.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py new file mode 100644 index 000000000..62e6d1da6 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_auth.py @@ -0,0 +1,351 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_auth +short_description: NetApp E-Series set or update the password for a storage array device or SANtricity Web Services Proxy. +description: + - Sets or updates the password for a storage array device or SANtricity Web Services Proxy. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + current_admin_password: + description: + - The current admin password. + - When making changes to the embedded web services's login passwords, api_password will be used and current_admin_password will be ignored. + - When making changes to the proxy web services's login passwords, api_password will be used and current_admin_password will be ignored. + - Only required when the password has been set and will be ignored if not set. + type: str + required: false + password: + description: + - The password you would like to set. + - Cannot be more than 30 characters. + type: str + required: false + user: + description: + - The local user account password to update + - For systems prior to E2800, use admin to change the rw (system password). + - For systems prior to E2800, all choices except admin will be ignored. + type: str + choices: ["admin", "monitor", "support", "security", "storage"] + default: "admin" + required: false + minimum_password_length: + description: + - This option defines the minimum password length. + type: int + required: false +notes: + - Set I(ssid=="0") or I(ssid=="proxy") when attempting to change the password for SANtricity Web Services Proxy. + - SANtricity Web Services Proxy storage password will be updated when changing the password on a managed storage system from the proxy; This is only true + when the storage system has been previously contacted. +""" + +EXAMPLES = """ +- name: Set the initial password + na_santricity_auth: + ssid: 1 + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + validate_certs: true + current_admin_password: currentadminpass + password: newpassword123 + user: admin +""" + +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: "Password Updated Successfully" +""" +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native +from time import sleep + + +class NetAppESeriesAuth(NetAppESeriesModule): + def __init__(self): + version = "02.00.0000.0000" + ansible_options = dict(current_admin_password=dict(type="str", required=False, no_log=True), + password=dict(type="str", required=False, no_log=True), + user=dict(type="str", choices=["admin", "monitor", "support", "security", "storage"], default="admin", required=False), + minimum_password_length=dict(type="int", required=False, no_log=True)) + + super(NetAppESeriesAuth, self).__init__(ansible_options=ansible_options, web_services_version=version, supports_check_mode=True) + args = self.module.params + self.current_admin_password = args["current_admin_password"] + self.password = args["password"] + self.user = args["user"] + self.minimum_password_length = args["minimum_password_length"] + + self.DEFAULT_HEADERS.update({"x-netapp-password-validate-method": "none"}) + + self.is_admin_password_set = None + self.current_password_length_requirement = None + + def minimum_password_length_change_required(self): + """Retrieve the current storage array's global configuration.""" + change_required = False + try: + if self.is_proxy(): + if self.ssid == "0" or self.ssid.lower() == "proxy": + rc, system_info = self.request("local-users/info", force_basic_auth=False) + + elif self.is_embedded_available(): + rc, system_info = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/info" % self.ssid, + force_basic_auth=False) + else: + return False # legacy systems without embedded web services. + else: + rc, system_info = self.request("storage-systems/%s/local-users/info" % self.ssid, force_basic_auth=False) + except Exception as error: + self.module.fail_json(msg="Failed to determine minimum password length. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + self.is_admin_password_set = system_info["adminPasswordSet"] + if self.minimum_password_length is not None and self.minimum_password_length != system_info["minimumPasswordLength"]: + change_required = True + + if (self.password is not None and ((change_required and self.minimum_password_length > len(self.password)) or + (not change_required and system_info["minimumPasswordLength"] > len(self.password)))): + self.module.fail_json(msg="Password does not meet the length requirement [%s]. Array Id [%s]." % (system_info["minimumPasswordLength"], self.ssid)) + + return change_required + + def update_minimum_password_length(self): + """Update automatic load balancing state.""" + try: + if self.is_proxy(): + if self.ssid == "0" or self.ssid.lower() == "proxy": + try: + if not self.is_admin_password_set: + self.creds["url_password"] = "admin" + rc, minimum_password_length = self.request("local-users/password-length", method="POST", + data={"minimumPasswordLength": self.minimum_password_length}) + except Exception as error: + if not self.is_admin_password_set: + self.creds["url_password"] = "" + rc, minimum_password_length = self.request("local-users/password-length", method="POST", + data={"minimumPasswordLength": self.minimum_password_length}) + elif self.is_embedded_available(): + if not self.is_admin_password_set: + self.creds["url_password"] = "" + rc, minimum_password_length = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/password-length" % self.ssid, + method="POST", data={"minimumPasswordLength": self.minimum_password_length}) + else: + if not self.is_admin_password_set: + self.creds["url_password"] = "" + rc, minimum_password_length = self.request("storage-systems/%s/local-users/password-length" % self.ssid, method="POST", + data={"minimumPasswordLength": self.minimum_password_length}) + except Exception as error: + self.module.fail_json(msg="Failed to set minimum password length. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def logout_system(self): + """Ensure system is logged out. This is required because login test will always succeed if previously logged in.""" + try: + if self.is_proxy(): + if self.ssid == "0" or self.ssid.lower() == "proxy": + rc, system_info = self.request("utils/login", rest_api_path=self.DEFAULT_BASE_PATH, method="DELETE", force_basic_auth=False) + elif self.is_embedded_available(): + rc, system_info = self.request("storage-systems/%s/forward/devmgr/utils/login" % self.ssid, method="DELETE", force_basic_auth=False) + else: + # Nothing to do for legacy systems without embedded web services. + pass + else: + rc, system_info = self.request("utils/login", rest_api_path=self.DEFAULT_BASE_PATH, method="DELETE", force_basic_auth=False) + except Exception as error: + self.module.fail_json(msg="Failed to log out of storage system [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def password_change_required(self): + """Verify whether the current password is expected array password. Works only against embedded systems.""" + if self.password is None: + return False + + change_required = False + system_info = None + try: + if self.is_proxy(): + if self.ssid == "0" or self.ssid.lower() == "proxy": + rc, system_info = self.request("local-users/info", force_basic_auth=False) + elif self.is_embedded_available(): + rc, system_info = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users/info" % self.ssid, + force_basic_auth=False) + else: + rc, response = self.request("storage-systems/%s/passwords" % self.ssid, ignore_errors=True) + system_info = {"minimumPasswordLength": 0, "adminPasswordSet": response["adminPasswordSet"]} + else: + rc, system_info = self.request("storage-systems/%s/local-users/info" % self.ssid, force_basic_auth=False) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve information about storage system [%s]. Error [%s]." % (self.ssid, to_native(error))) + + self.is_admin_password_set = system_info["adminPasswordSet"] + + if not self.is_admin_password_set: + if self.user == "admin" and self.password != "": + change_required = True + + # Determine whether user's password needs to be changed + else: + utils_login_used = False + self.logout_system() # This ensures that login test functions correctly. The query onlycheck=true does not work. + + if self.is_proxy(): + if self.ssid == "0" or self.ssid.lower() == "proxy": + utils_login_used = True + rc, response = self.request("utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false" % (self.user, self.password), + rest_api_path=self.DEFAULT_BASE_PATH, log_request=False, ignore_errors=True, force_basic_auth=False) + # elif self.is_embedded_available(): + # utils_login_used = True + # rc, response = self.request("storage-systems/%s/forward/devmgr/utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false" + # % (self.ssid, self.user, self.password), log_request=False, ignore_errors=True, force_basic_auth=False) + else: + if self.user == "admin": + rc, response = self.request("storage-systems/%s/stored-password/validate" % self.ssid, method="POST", log_request=False, + ignore_errors=True, data={"password": self.password}) + if rc == 200: + change_required = not response["isValidPassword"] + elif rc == 404: # endpoint did not exist, old proxy version + if self.is_web_services_version_met("04.10.0000.0000"): + self.module.fail_json(msg="For platforms before E2800 use SANtricity Web Services Proxy 4.1 or later! Array Id [%s].") + self.module.fail_json(msg="Failed to validate stored password! Array Id [%s].") + else: + self.module.fail_json(msg="Failed to validate stored password! Array Id [%s]." % self.ssid) + else: + self.module.fail_json(msg="Role based login not available! Only storage system password can be set for storage systems prior to E2800." + " Array Id [%s]." % self.ssid) + else: + utils_login_used = True + rc, response = self.request("utils/login?uid=%s&pwd=%s&xsrf=false&onlycheck=false" % (self.user, self.password), + rest_api_path=self.DEFAULT_BASE_PATH, log_request=False, ignore_errors=True, force_basic_auth=False) + + # Check return codes to determine whether a change is required + if utils_login_used: + if rc == 401: + change_required = True + elif rc == 422: + self.module.fail_json(msg="SAML enabled! SAML disables default role based login. Array [%s]" % self.ssid) + + return change_required + + def set_array_admin_password(self): + """Set the array's admin password.""" + if self.is_proxy(): + + # Update proxy's local users + if self.ssid == "0" or self.ssid.lower() == "proxy": + self.creds["url_password"] = "admin" + try: + body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}} + rc, proxy = self.request("local-users", method="POST", data=body) + except Exception as error: + self.creds["url_password"] = "" + try: + body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}} + rc, proxy = self.request("local-users", method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to set proxy's admin password. Error [%s]." % to_native(error)) + + self.creds["url_password"] = self.password + + # Update password using the password endpoints, this will also update the storaged password + else: + try: + body = {"currentAdminPassword": "", "newPassword": self.password, "adminPassword": True} + rc, storage_system = self.request("storage-systems/%s/passwords" % self.ssid, method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to set storage system's admin password. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + # Update embedded local users + else: + self.creds["url_password"] = "" + try: + body = {"currentAdminPassword": "", "updates": {"userName": "admin", "newPassword": self.password}} + rc, proxy = self.request("storage-systems/%s/local-users" % self.ssid, method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to set embedded storage system's admin password. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + self.creds["url_password"] = self.password + + def set_array_password(self): + """Set the array password.""" + if not self.is_admin_password_set: + self.module.fail_json(msg="Admin password not set! Set admin password before changing non-admin user passwords. Array [%s]." % self.ssid) + + if self.is_proxy(): + + # Update proxy's local users + if self.ssid == "0" or self.ssid.lower() == "proxy": + try: + body = {"currentAdminPassword": self.creds["url_password"], "updates": {"userName": self.user, "newPassword": self.password}} + rc, proxy = self.request("local-users", method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to set proxy password. Error [%s]." % to_native(error)) + + # Update embedded admin password via proxy passwords endpoint to include updating proxy/unified manager + elif self.user == "admin": + try: + body = {"adminPassword": True, "currentAdminPassword": self.current_admin_password, "newPassword": self.password} + rc, proxy = self.request("storage-systems/%s/passwords" % self.ssid, method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to set embedded user password. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + # Update embedded non-admin passwords via proxy forward endpoint. + elif self.is_embedded_available(): + try: + body = {"currentAdminPassword": self.current_admin_password, "updates": {"userName": self.user, "newPassword": self.password}} + rc, proxy = self.request("storage-systems/%s/forward/devmgr/v2/storage-systems/1/local-users" % self.ssid, method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to set embedded user password. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + # Update embedded local users + else: + try: + body = {"currentAdminPassword": self.creds["url_password"], "updates": {"userName": self.user, "newPassword": self.password}} + rc, proxy = self.request("storage-systems/%s/local-users" % self.ssid, method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to set embedded user password. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def apply(self): + """Apply any required changes.""" + password_change_required = self.password_change_required() + minimum_password_length_change_required = self.minimum_password_length_change_required() + change_required = password_change_required or minimum_password_length_change_required + + if change_required and not self.module.check_mode: + if minimum_password_length_change_required: + self.update_minimum_password_length() + + if password_change_required: + if not self.is_admin_password_set: + self.set_array_admin_password() + else: + self.set_array_password() + + if password_change_required and minimum_password_length_change_required: + self.module.exit_json(msg="'%s' password and required password length has been changed. Array [%s]." + % (self.user, self.ssid), changed=change_required) + elif password_change_required: + self.module.exit_json(msg="'%s' password has been changed. Array [%s]." % (self.user, self.ssid), changed=change_required) + elif minimum_password_length_change_required: + self.module.exit_json(msg="Required password length has been changed. Array [%s]." % self.ssid, changed=change_required) + self.module.exit_json(msg="No changes have been made. Array [%s]." % self.ssid, changed=change_required) + + +def main(): + auth = NetAppESeriesAuth() + auth.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py new file mode 100644 index 000000000..e7fe8eda7 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_client_certificate.py @@ -0,0 +1,278 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_santricity_client_certificate +short_description: NetApp E-Series manage remote server certificates. +description: Manage NetApp E-Series storage array's remote server certificates. +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + certificates: + description: + - List of certificate files + - Each item must include the path to the file + type: list + required: false + remove_unspecified_user_certificates: + description: + - Whether to remove user install client certificates that are not specified in I(certificates). + type: bool + default: false + required: false + reload_certificates: + description: + - Whether to reload certificates when certificates have been added or removed. + - Certificates will not be available or removed until the servers have been reloaded. + type: bool + default: true + required: false +notes: + - Set I(ssid=="0") or I(ssid=="proxy") to specifically reference SANtricity Web Services Proxy. +requirements: + - cryptography +""" +EXAMPLES = """ +- name: Upload certificates + na_santricity_client_certificate: + ssid: 1 + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + certificates: ["/path/to/certificates.crt", "/path/to/another_certificate.crt"] +- name: Remove all certificates + na_santricity_client_certificate: + ssid: 1 + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass +""" +RETURN = """ +changed: + description: Whether changes have been made. + type: bool + returned: always + sample: true +add_certificates: + description: Any SSL certificates that were added. + type: list + returned: always + sample: ["added_cerificiate.crt"] +removed_certificates: + description: Any SSL certificates that were removed. + type: list + returned: always + sample: ["removed_cerificiate.crt"] +""" + +import binascii +import os +import re +from time import sleep + +from datetime import datetime +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata +from ansible.module_utils._text import to_native + +try: + from cryptography import x509 + from cryptography.hazmat.backends import default_backend +except ImportError: + HAS_CRYPTOGRAPHY = False +else: + HAS_CRYPTOGRAPHY = True + + +class NetAppESeriesClientCertificate(NetAppESeriesModule): + RELOAD_TIMEOUT_SEC = 3 * 60 + + def __init__(self): + ansible_options = dict(certificates=dict(type="list", required=False), + remove_unspecified_user_certificates=dict(type="bool", default=False, required=False), + reload_certificates=dict(type="bool", default=True, required=False)) + + super(NetAppESeriesClientCertificate, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.certificates = args["certificates"] if args["certificates"] else [] + self.remove_unspecified_user_certificates = args["remove_unspecified_user_certificates"] + self.apply_reload_certificates = args["reload_certificates"] + + # Check whether request needs to be forwarded on to the controller web services rest api. + self.url_path_prefix = "" + if self.is_proxy() and self.ssid != "0" and self.ssid.lower() != "proxy": + self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid + + self.remove_certificates = list() + self.add_certificates = list() + self.certificate_fingerprint_cache = None + self.certificate_info_cache = None + + def certificate_info(self, path): + """Determine the pertinent certificate information: alias, subjectDN, issuerDN, start and expire. + + Note: Use only when certificate/remote-server endpoints do not exist. Used to identify certificates through + the sslconfig/ca endpoint. + """ + certificate = None + with open(path, "rb") as fh: + data = fh.read() + try: + certificate = x509.load_pem_x509_certificate(data, default_backend()) + except Exception as error: + try: + certificate = x509.load_der_x509_certificate(data, default_backend()) + except Exception as error: + self.module.fail_json(msg="Failed to load certificate. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + if not isinstance(certificate, x509.Certificate): + self.module.fail_json(msg="Failed to open certificate file or invalid certificate object type. Array [%s]." % self.ssid) + + return dict(start_date=certificate.not_valid_before, + expire_date=certificate.not_valid_after, + subject_dn=[attr.value for attr in certificate.subject], + issuer_dn=[attr.value for attr in certificate.issuer]) + + def certificate_fingerprint(self, path): + """Load x509 certificate that is either encoded DER or PEM encoding and return the certificate fingerprint.""" + certificate = None + with open(path, "rb") as fh: + data = fh.read() + try: + certificate = x509.load_pem_x509_certificate(data, default_backend()) + except Exception as error: + try: + certificate = x509.load_der_x509_certificate(data, default_backend()) + except Exception as error: + self.module.fail_json(msg="Failed to determine certificate fingerprint. File [%s]. Array [%s]. Error [%s]." + % (path, self.ssid, to_native(error))) + + return binascii.hexlify(certificate.fingerprint(certificate.signature_hash_algorithm)).decode("utf-8") + + def determine_changes(self): + """Search for remote server certificate that goes by the alias or has a matching fingerprint.""" + rc, current_certificates = self.request(self.url_path_prefix + "certificates/remote-server", ignore_errors=True) + + if rc == 404: # system down or endpoint does not exist + rc, current_certificates = self.request(self.url_path_prefix + "sslconfig/ca?useTruststore=true", ignore_errors=True) + + if rc > 299: + self.module.fail_json(msg="Failed to retrieve remote server certificates. Array [%s]." % self.ssid) + + user_installed_certificates = [certificate for certificate in current_certificates if certificate["isUserInstalled"]] + existing_certificates = [] + + for path in self.certificates: + for current_certificate in user_installed_certificates: + info = self.certificate_info(path) + tmp = dict(subject_dn=[re.sub(r".*=", "", item) for item in current_certificate["subjectDN"].split(", ")], + issuer_dn=[re.sub(r".*=", "", item) for item in current_certificate["issuerDN"].split(", ")], + start_date=datetime.strptime(current_certificate["start"].split(".")[0], "%Y-%m-%dT%H:%M:%S"), + expire_date=datetime.strptime(current_certificate["expire"].split(".")[0], "%Y-%m-%dT%H:%M:%S")) + if (all([attr in info["subject_dn"] for attr in tmp["subject_dn"]]) and + all([attr in info["issuer_dn"] for attr in tmp["issuer_dn"]]) and + tmp["start_date"] == info["start_date"] and + tmp["expire_date"] == info["expire_date"]): + existing_certificates.append(current_certificate) + break + else: + self.add_certificates.append(path) + if self.remove_unspecified_user_certificates: + self.remove_certificates = [certificate for certificate in user_installed_certificates if certificate not in existing_certificates] + + elif rc > 299: + self.module.fail_json(msg="Failed to retrieve remote server certificates. Array [%s]." % self.ssid) + + else: + user_installed_certificates = [certificate for certificate in current_certificates if certificate["isUserInstalled"]] + existing_certificates = [] + for path in self.certificates: + fingerprint = self.certificate_fingerprint(path) + for current_certificate in user_installed_certificates: + if current_certificate["sha256Fingerprint"] == fingerprint or current_certificate["shaFingerprint"] == fingerprint: + existing_certificates.append(current_certificate) + break + else: + self.add_certificates.append(path) + if self.remove_unspecified_user_certificates: + self.remove_certificates = [certificate for certificate in user_installed_certificates if certificate not in existing_certificates] + + def upload_certificate(self, path): + """Add or update remote server certificate to the storage array.""" + file_name = os.path.basename(path) + headers, data = create_multipart_formdata(files=[("file", file_name, path)]) + + rc, resp = self.request(self.url_path_prefix + "certificates/remote-server", method="POST", headers=headers, data=data, ignore_errors=True) + if rc == 404: + rc, resp = self.request(self.url_path_prefix + "sslconfig/ca?useTruststore=true", method="POST", headers=headers, data=data, ignore_errors=True) + + if rc > 299: + self.module.fail_json(msg="Failed to upload certificate. Array [%s]. Error [%s, %s]." % (self.ssid, rc, resp)) + + def delete_certificate(self, info): + """Delete existing remote server certificate in the storage array truststore.""" + rc, resp = self.request(self.url_path_prefix + "certificates/remote-server/%s" % info["alias"], method="DELETE", ignore_errors=True) + if rc == 404: + rc, resp = self.request(self.url_path_prefix + "sslconfig/ca/%s?useTruststore=true" % info["alias"], method="DELETE", ignore_errors=True) + + if rc > 204: + self.module.fail_json(msg="Failed to delete certificate. Alias [%s]. Array [%s]. Error [%s, %s]." % (info["alias"], self.ssid, rc, resp)) + + def reload_certificates(self): + """Reload certificates on both controllers.""" + rc, resp = self.request(self.url_path_prefix + "certificates/reload?reloadBoth=true", method="POST", ignore_errors=True) + if rc == 404: + rc, resp = self.request(self.url_path_prefix + "sslconfig/reload?reloadBoth=true", method="POST", ignore_errors=True) + + if rc > 202: + self.module.fail_json(msg="Failed to initiate certificate reload on both controllers! Array [%s]." % self.ssid) + + # Wait for controller to be online again. + for retry in range(int(self.RELOAD_TIMEOUT_SEC / 3)): + rc, current_certificates = self.request(self.url_path_prefix + "certificates/remote-server", ignore_errors=True) + + if rc == 404: # system down or endpoint does not exist + rc, current_certificates = self.request(self.url_path_prefix + "sslconfig/ca?useTruststore=true", ignore_errors=True) + + if rc < 300: + break + sleep(3) + else: + self.module.fail_json(msg="Failed to retrieve server certificates. Array [%s]." % self.ssid) + + def apply(self): + """Apply state changes to the storage array's truststore.""" + changed = False + + self.determine_changes() + if self.remove_certificates or self.add_certificates: + changed = True + + if changed and not self.module.check_mode: + for info in self.remove_certificates: + self.delete_certificate(info) + + for path in self.add_certificates: + self.upload_certificate(path) + + if self.apply_reload_certificates: + self.reload_certificates() + + self.module.exit_json(changed=changed, removed_certificates=self.remove_certificates, add_certificates=self.add_certificates) + + +def main(): + client_certs = NetAppESeriesClientCertificate() + client_certs.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py new file mode 100644 index 000000000..c283c3d46 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_discover.py @@ -0,0 +1,332 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_discover +short_description: NetApp E-Series discover E-Series storage systems +description: Module searches a subnet range and returns any available E-Series storage systems. +author: Nathan Swartz (@ndswartz) +options: + subnet_mask: + description: + - This is the IPv4 search range for discovering E-Series storage arrays. + - IPv4 subnet mask specified in CIDR form. Example 192.168.1.0/24 would search the range 192.168.1.0 to 192.168.1.255. + - Be sure to include all management paths in the search range. + type: str + required: true + ports: + description: + - This option specifies which ports to be tested during the discovery process. + - The first usable port will be used in the returned API url. + type: list + default: [8443] + required: false + proxy_url: + description: + - Web Services Proxy REST API URL. Example https://192.168.1.100:8443/devmgr/v2/ + type: str + required: false + proxy_username: + description: + - Web Service Proxy username + type: str + required: false + proxy_password: + description: + - Web Service Proxy user password + type: str + required: false + proxy_validate_certs: + description: + - Whether to validate Web Service Proxy SSL certificate + type: bool + default: true + required: false + prefer_embedded: + description: + - Give preference to Web Services Embedded when an option exists for both Web Services Proxy and Embedded. + - Web Services Proxy will be utilized when available by default. + type: bool + default: false + required: false +notes: + - Only available for platforms E2800 or later (SANtricity Web Services Embedded REST API must be available). + - All E-Series storage systems with SANtricity version 11.62 or later will be discovered. + - Only E-Series storage systems without a set admin password running SANtricity versions prior to 11.62 will be discovered. + - Use SANtricity Web Services Proxy to discover all systems regardless of SANricity version or password. +requirements: + - ipaddress +""" + +EXAMPLES = """ +- name: Discover all E-Series storage systems on the network. + na_santricity_discover: + subnet_mask: 192.168.1.0/24 +""" + +RETURN = """ +systems_found: + description: Success message + returned: on success + type: dict + sample: '{"012341234123": { + "addresses": ["192.168.1.184", "192.168.1.185"], + "api_urls": ["https://192.168.1.184:8443/devmgr/v2/", "https://192.168.1.185:8443/devmgr/v2/"], + "label": "ExampleArray01", + "proxy_ssid: "", + "proxy_required": false}, + "012341234567": { + "addresses": ["192.168.1.23", "192.168.1.24"], + "api_urls": ["https://192.168.1.100:8443/devmgr/v2/"], + "label": "ExampleArray02", + "proxy_ssid": "array_ssid", + "proxy_required": true}}' +""" + +import json +import multiprocessing +import threading +from time import sleep + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import request +from ansible.module_utils._text import to_native + +try: + import ipaddress +except ImportError: + HAS_IPADDRESS = False +else: + HAS_IPADDRESS = True + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse + + +class NetAppESeriesDiscover: + """Discover E-Series storage systems.""" + MAX_THREAD_POOL_SIZE = 256 + CPU_THREAD_MULTIPLE = 32 + SEARCH_TIMEOUT = 30 + DEFAULT_CONNECTION_TIMEOUT_SEC = 30 + DEFAULT_DISCOVERY_TIMEOUT_SEC = 300 + + def __init__(self): + ansible_options = dict(subnet_mask=dict(type="str", required=True), + ports=dict(type="list", required=False, default=[8443]), + proxy_url=dict(type="str", required=False), + proxy_username=dict(type="str", required=False), + proxy_password=dict(type="str", required=False, no_log=True), + proxy_validate_certs=dict(type="bool", default=True, required=False), + prefer_embedded=dict(type="bool", default=False, required=False)) + + required_together = [["proxy_url", "proxy_username", "proxy_password"]] + self.module = AnsibleModule(argument_spec=ansible_options, required_together=required_together) + args = self.module.params + + self.subnet_mask = args["subnet_mask"] + self.prefer_embedded = args["prefer_embedded"] + self.ports = [] + self.proxy_url = args["proxy_url"] + if args["proxy_url"]: + parsed_url = list(urlparse.urlparse(args["proxy_url"])) + parsed_url[2] = "/devmgr/utils/about" + self.proxy_about_url = urlparse.urlunparse(parsed_url) + parsed_url[2] = "/devmgr/v2/" + self.proxy_url = urlparse.urlunparse(parsed_url) + self.proxy_username = args["proxy_username"] + self.proxy_password = args["proxy_password"] + self.proxy_validate_certs = args["proxy_validate_certs"] + + for port in args["ports"]: + if str(port).isdigit() and 0 < port < 2 ** 16: + self.ports.append(str(port)) + else: + self.module.fail_json(msg="Invalid port! Ports must be positive numbers between 0 and 65536.") + + self.systems_found = {} + + def check_ip_address(self, systems_found, address): + """Determine where an E-Series storage system is available at a specific ip address.""" + for port in self.ports: + if port == "8080": + url = "http://%s:%s/" % (address, port) + else: + url = "https://%s:%s/" % (address, port) + + try: + rc, about = request(url + "devmgr/v2/storage-systems/1/about", validate_certs=False, force_basic_auth=False, ignore_errors=True) + if about["serialNumber"] in systems_found: + systems_found[about["serialNumber"]]["api_urls"].append(url) + else: + systems_found.update({about["serialNumber"]: {"api_urls": [url], "label": about["name"], + "addresses": [], "proxy_ssid": "", "proxy_required": False}}) + break + except Exception as error: + try: + rc, sa_data = request(url + "devmgr/v2/storage-systems/1/symbol/getSAData", validate_certs=False, force_basic_auth=False, + ignore_errors=True) + if rc == 401: # Unauthorized + self.module.warn( + "Fail over and discover any storage system without a set admin password. This will discover systems without a set password" + " such as newly deployed storage systems. Address [%s]." % address) + # Fail over and discover any storage system without a set admin password. This will cover newly deployed systems. + rc, graph = request(url + "graph", validate_certs=False, url_username="admin", url_password="", timeout=self.SEARCH_TIMEOUT) + sa_data = graph["sa"]["saData"] + + if sa_data["chassisSerialNumber"] in systems_found: + systems_found[sa_data["chassisSerialNumber"]]["api_urls"].append(url) + else: + systems_found.update({sa_data["chassisSerialNumber"]: {"api_urls": [url], "label": sa_data["storageArrayLabel"], + "addresses": [], "proxy_ssid": "", "proxy_required": False}}) + break + except Exception as error: + pass + + def no_proxy_discover(self): + """Discover E-Series storage systems using embedded web services.""" + thread_pool_size = min(multiprocessing.cpu_count() * self.CPU_THREAD_MULTIPLE, self.MAX_THREAD_POOL_SIZE) + subnet = list(ipaddress.ip_network(u"%s" % self.subnet_mask)) + + thread_pool = [] + search_count = len(subnet) + for start in range(0, search_count, thread_pool_size): + end = search_count if (search_count - start) < thread_pool_size else start + thread_pool_size + + for address in subnet[start:end]: + thread = threading.Thread(target=self.check_ip_address, args=(self.systems_found, address)) + thread_pool.append(thread) + thread.start() + for thread in thread_pool: + thread.join() + + def verify_proxy_service(self): + """Verify proxy url points to a web services proxy.""" + try: + rc, about = request(self.proxy_about_url, validate_certs=self.proxy_validate_certs) + if not about["runningAsProxy"]: + self.module.fail_json(msg="Web Services is not running as a proxy!") + except Exception as error: + self.module.fail_json(msg="Proxy is not available! Check proxy_url. Error [%s]." % to_native(error)) + + def test_systems_found(self, systems_found, serial, label, addresses): + """Verify and build api urls.""" + api_urls = [] + for address in addresses: + for port in self.ports: + if port == "8080": + url = "http://%s:%s/devmgr/" % (address, port) + else: + url = "https://%s:%s/devmgr/" % (address, port) + + try: + rc, response = request(url + "utils/about", validate_certs=False, timeout=self.SEARCH_TIMEOUT) + api_urls.append(url + "v2/") + break + except Exception as error: + pass + systems_found.update({serial: {"api_urls": api_urls, + "label": label, + "addresses": addresses, + "proxy_ssid": "", + "proxy_required": False}}) + + def proxy_discover(self): + """Search for array using it's chassis serial from web services proxy.""" + self.verify_proxy_service() + subnet = ipaddress.ip_network(u"%s" % self.subnet_mask) + + try: + rc, request_id = request(self.proxy_url + "discovery", method="POST", validate_certs=self.proxy_validate_certs, + force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password, + data=json.dumps({"startIP": str(subnet[0]), "endIP": str(subnet[-1]), + "connectionTimeout": self.DEFAULT_CONNECTION_TIMEOUT_SEC})) + + # Wait for discover to complete + try: + for iteration in range(self.DEFAULT_DISCOVERY_TIMEOUT_SEC): + rc, discovered_systems = request(self.proxy_url + "discovery?requestId=%s" % request_id["requestId"], + validate_certs=self.proxy_validate_certs, + force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password) + if not discovered_systems["discoverProcessRunning"]: + thread_pool = [] + for discovered_system in discovered_systems["storageSystems"]: + addresses = [] + for controller in discovered_system["controllers"]: + addresses.extend(controller["ipAddresses"]) + + # Storage systems with embedded web services. + if "https" in discovered_system["supportedManagementPorts"] and self.prefer_embedded: + + thread = threading.Thread(target=self.test_systems_found, + args=(self.systems_found, discovered_system["serialNumber"], discovered_system["label"], addresses)) + thread_pool.append(thread) + thread.start() + + # Storage systems without embedded web services. + else: + self.systems_found.update({discovered_system["serialNumber"]: {"api_urls": [self.proxy_url], + "label": discovered_system["label"], + "addresses": addresses, + "proxy_ssid": "", + "proxy_required": True}}) + for thread in thread_pool: + thread.join() + break + sleep(1) + else: + self.module.fail_json(msg="Timeout waiting for array discovery process. Subnet [%s]" % self.subnet_mask) + except Exception as error: + self.module.fail_json(msg="Failed to get the discovery results. Error [%s]." % to_native(error)) + except Exception as error: + self.module.fail_json(msg="Failed to initiate array discovery. Error [%s]." % to_native(error)) + + def update_proxy_with_proxy_ssid(self): + """Determine the current proxy ssid for all discovered-proxy_required storage systems.""" + # Discover all added storage systems to the proxy. + systems = [] + try: + rc, systems = request(self.proxy_url + "storage-systems", validate_certs=self.proxy_validate_certs, + force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password) + except Exception as error: + self.module.fail_json(msg="Failed to ascertain storage systems added to Web Services Proxy.") + + for system_key, system_info in self.systems_found.items(): + if self.systems_found[system_key]["proxy_required"]: + for system in systems: + if system_key == system["chassisSerialNumber"]: + self.systems_found[system_key]["proxy_ssid"] = system["id"] + + def discover(self): + """Discover E-Series storage systems.""" + missing_packages = [] + if not HAS_IPADDRESS: + missing_packages.append("ipaddress") + + if missing_packages: + self.module.fail_json(msg="Python packages are missing! Packages [%s]." % ", ".join(missing_packages)) + + if self.proxy_url: + self.proxy_discover() + self.update_proxy_with_proxy_ssid() + else: + self.no_proxy_discover() + + self.module.exit_json(msg="Discover process complete.", systems_found=self.systems_found, changed=False) + + +def main(): + discover = NetAppESeriesDiscover() + discover.discover() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py new file mode 100644 index 000000000..612ce2bd6 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_drive_firmware.py @@ -0,0 +1,209 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_drive_firmware +short_description: NetApp E-Series manage drive firmware +description: + - Ensure drive firmware version is activated on specified drive model. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + firmware: + description: + - list of drive firmware file paths. + - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/ + type: list + required: True + wait_for_completion: + description: + - This flag will cause module to wait for any upgrade actions to complete. + type: bool + default: false + ignore_inaccessible_drives: + description: + - This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible. + type: bool + default: false + upgrade_drives_online: + description: + - This flag will determine whether drive firmware can be upgrade while drives are accepting I/O. + - When I(upgrade_drives_online==False) stop all I/O before running task. + type: bool + default: true +""" +EXAMPLES = """ +- name: Ensure correct firmware versions + na_santricity_drive_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + firmware: "path/to/drive_firmware" + wait_for_completion: true + ignore_inaccessible_drives: false +""" +RETURN = """ +msg: + description: Whether any drive firmware was upgraded and whether it is in progress. + type: str + returned: always + sample: + { changed: True, upgrade_in_process: True } +""" +import os +import re + +from time import sleep +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request +from ansible.module_utils._text import to_native + + +class NetAppESeriesDriveFirmware(NetAppESeriesModule): + WAIT_TIMEOUT_SEC = 60 * 15 + + def __init__(self): + ansible_options = dict( + firmware=dict(type="list", required=True), + wait_for_completion=dict(type="bool", default=False), + ignore_inaccessible_drives=dict(type="bool", default=False), + upgrade_drives_online=dict(type="bool", default=True)) + + super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.firmware_list = args["firmware"] + self.wait_for_completion = args["wait_for_completion"] + self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"] + self.upgrade_drives_online = args["upgrade_drives_online"] + + self.upgrade_list_cache = None + + self.upgrade_required_cache = None + self.upgrade_in_progress = False + self.drive_info_cache = None + + def upload_firmware(self): + """Ensure firmware has been upload prior to uploaded.""" + for firmware in self.firmware_list: + firmware_name = os.path.basename(firmware) + files = [("file", firmware_name, firmware)] + headers, data = create_multipart_formdata(files) + try: + rc, response = self.request("/files/drive", method="POST", headers=headers, data=data) + except Exception as error: + self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error))) + + def upgrade_list(self): + """Determine whether firmware is compatible with the specified drives.""" + if self.upgrade_list_cache is None: + self.upgrade_list_cache = list() + try: + rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid) + + # Create upgrade list, this ensures only the firmware uploaded is applied + for firmware in self.firmware_list: + filename = os.path.basename(firmware) + + for uploaded_firmware in response["compatibilities"]: + if uploaded_firmware["filename"] == filename: + + # Determine whether upgrade is required + drive_reference_list = [] + for drive in uploaded_firmware["compatibleDrives"]: + try: + rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"])) + + # Add drive references that are supported and differ from current firmware + if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and + uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]): + + if self.ignore_inaccessible_drives or not drive_info["offline"]: + drive_reference_list.append(drive["driveRef"]) + + if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online: + self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]." + % (self.ssid, drive["driveRef"])) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]." + % (self.ssid, drive["driveRef"], to_native(error))) + + if drive_reference_list: + self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}]) + + except Exception as error: + self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return self.upgrade_list_cache + + def wait_for_upgrade_completion(self): + """Wait for drive firmware upgrade to complete.""" + drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]] + last_status = None + for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)): + try: + rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid) + + # Check drive status + for status in response["driveStatus"]: + last_status = status + if status["driveRef"] in drive_references: + if status["status"] == "okay": + continue + elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]: + break + else: + self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]." + % (self.ssid, status["driveRef"], status["status"])) + else: + self.upgrade_in_progress = False + break + except Exception as error: + self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + sleep(5) + else: + self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status)) + + def upgrade(self): + """Apply firmware to applicable drives.""" + try: + rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s" + % (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list()) + self.upgrade_in_progress = True + except Exception as error: + self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + if self.wait_for_completion: + self.wait_for_upgrade_completion() + + def apply(self): + """Apply firmware policy has been enforced on E-Series storage system.""" + self.upload_firmware() + + if self.upgrade_list() and not self.module.check_mode: + self.upgrade() + + self.module.exit_json(changed=True if self.upgrade_list() else False, + upgrade_in_process=self.upgrade_in_progress) + + +def main(): + drive_firmware = NetAppESeriesDriveFirmware() + drive_firmware.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py new file mode 100644 index 000000000..32906e0d4 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_facts.py @@ -0,0 +1,1185 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +module: na_santricity_facts +short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays +description: + - The na_santricity_facts module returns a collection of facts regarding NetApp E-Series storage arrays. +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +''' + +EXAMPLES = """ +--- +- name: Get array facts + na_santricity_facts: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true +""" + +RETURN = """ + msg: + description: Success message + returned: on success + type: str + sample: + - Gathered facts for storage array. Array ID [1]. + - Gathered facts for web services proxy. + storage_array_facts: + description: provides details about the array, controllers, management interfaces, hostside interfaces, + driveside interfaces, disks, storage pools, volumes, snapshots, and features. + returned: on successful inquiry from from embedded web services rest api + type: complex + contains: + netapp_controllers: + description: storage array controller list that contains basic controller identification and status + type: complex + sample: + - [{"name": "A", "serial": "021632007299", "status": "optimal"}, + {"name": "B", "serial": "021632007300", "status": "failed"}] + netapp_disks: + description: drive list that contains identification, type, and status information for each drive + type: complex + sample: + - [{"available": false, + "firmware_version": "MS02", + "id": "01000000500003960C8B67880000000000000000", + "media_type": "ssd", + "product_id": "PX02SMU080 ", + "serial_number": "15R0A08LT2BA", + "status": "optimal", + "tray_ref": "0E00000000000000000000000000000000000000", + "usable_bytes": "799629205504" }] + netapp_driveside_interfaces: + description: drive side interface list that contains identification, type, and speed for each interface + type: complex + sample: + - [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }] + - [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }] + netapp_enabled_features: + description: specifies the enabled features on the storage array. + returned: on success + type: complex + sample: + - [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ] + netapp_host_groups: + description: specifies the host groups on the storage arrays. + returned: on success + type: complex + sample: + - [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }] + netapp_hosts: + description: specifies the hosts on the storage arrays. + returned: on success + type: complex + sample: + - [{ "id": "8203800000000000000000000000000000000000", + "name": "host1", + "group_id": "85000000600A098000A4B28D003610705C40B964", + "host_type_index": 28, + "ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" }, + { "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}] + netapp_host_types: + description: lists the available host types on the storage array. + returned: on success + type: complex + sample: + - [{ "index": 0, "type": "FactoryDefault" }, + { "index": 1, "type": "W2KNETNCL"}, + { "index": 2, "type": "SOL" }, + { "index": 5, "type": "AVT_4M" }, + { "index": 6, "type": "LNX" }, + { "index": 7, "type": "LnxALUA" }, + { "index": 8, "type": "W2KNETCL" }, + { "index": 9, "type": "AIX MPIO" }, + { "index": 10, "type": "VmwTPGSALUA" }, + { "index": 15, "type": "HPXTPGS" }, + { "index": 17, "type": "SolTPGSALUA" }, + { "index": 18, "type": "SVC" }, + { "index": 22, "type": "MacTPGSALUA" }, + { "index": 23, "type": "WinTPGSALUA" }, + { "index": 24, "type": "LnxTPGSALUA" }, + { "index": 25, "type": "LnxTPGSALUA_PM" }, + { "index": 26, "type": "ONTAP_ALUA" }, + { "index": 27, "type": "LnxTPGSALUA_SF" }, + { "index": 28, "type": "LnxDHALUA" }, + { "index": 29, "type": "ATTOClusterAllOS" }] + netapp_hostside_interfaces: + description: host side interface list that contains identification, configuration, type, speed, and + status information for each interface + type: complex + sample: + - [{"iscsi": + [{ "controller": "A", + "current_interface_speed": "10g", + "ipv4_address": "10.10.10.1", + "ipv4_enabled": true, + "ipv4_gateway": "10.10.10.1", + "ipv4_subnet_mask": "255.255.255.0", + "ipv6_enabled": false, + "iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76", + "link_status": "up", + "mtu": 9000, + "supported_interface_speeds": [ "10g" ] }]}] + netapp_management_interfaces: + description: management interface list that contains identification, configuration, and status for + each interface + type: complex + sample: + - [{"alias": "ict-2800-A", + "channel": 1, + "controller": "A", + "dns_config_method": "dhcp", + "dns_servers": [], + "ipv4_address": "10.1.1.1", + "ipv4_address_config_method": "static", + "ipv4_enabled": true, + "ipv4_gateway": "10.113.1.1", + "ipv4_subnet_mask": "255.255.255.0", + "ipv6_enabled": false, + "link_status": "up", + "mac_address": "00A098A81B5D", + "name": "wan0", + "ntp_config_method": "disabled", + "ntp_servers": [], + "remote_ssh_access": false }] + netapp_storage_array: + description: provides storage array identification, firmware version, and available capabilities + type: dict + sample: + - {"chassis_serial": "021540006043", + "firmware": "08.40.00.01", + "name": "ict-2800-11_40", + "wwn": "600A098000A81B5D0000000059D60C76", + "cacheBlockSizes": [4096, + 8192, + 16384, + 32768], + "supportedSegSizes": [8192, + 16384, + 32768, + 65536, + 131072, + 262144, + 524288]} + netapp_storage_pools: + description: storage pool list that contains identification and capacity information for each pool + type: complex + sample: + - [{"available_capacity": "3490353782784", + "id": "04000000600A098000A81B5D000002B45A953A61", + "name": "Raid6", + "total_capacity": "5399466745856", + "used_capacity": "1909112963072" }] + netapp_volumes: + description: storage volume list that contains identification and capacity information for each volume + type: complex + sample: + - [{"capacity": "5368709120", + "id": "02000000600A098000AAC0C3000002C45A952BAA", + "is_thin_provisioned": false, + "name": "5G", + "parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }] + netapp_workload_tags: + description: workload tag list + type: complex + sample: + - [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38", + "name": "ftp_server", + "workloadAttributes": [{"key": "use", + "value": "general"}]}] + netapp_volumes_by_initiators: + description: list of available volumes keyed by the mapped initiators. + type: complex + sample: + - {"beegfs_host": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E", + "meta_data": {"filetype": "ext4", "public": true}, + "name": "some_volume", + "workload_name": "beegfs_metadata", + "workload_metadata": {"filetype": "ext4", "public": true}, + "volume_metadata": '{"format_type": "ext4", + "format_options": "-i 2048 -I 512 -J size=400 -Odir_index,filetype", + "mount_options": "noatime,nodiratime,nobarrier,_netdev", + "mount_directory": "/data/beegfs/"}', + "host_types": ["nvmeof"], + "eui": "0000139A3885FA4500A0980000EAA272V", + "wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]} + snapshot_images: + description: snapshot image list that contains identification, capacity, and status information for each + snapshot image + type: complex + sample: + - [{"active_cow": true, + "creation_method": "user", + "id": "34000000600A098000A81B5D00630A965B0535AC", + "pit_capacity": "5368709120", + "reposity_cap_utilization": "0", + "rollback_source": false, + "status": "optimal" }] + proxy_facts: + description: proxy storage system list + returned: on successful inquiry from from web services proxy's rest api + type: complex + contains: + ssid: + description: storage system id + type: str + sample: "ec8ed9d2-eba3-4cac-88fb-0954f327f1d4" + name: + description: storage system name + type: str + sample: "EF570-NVMe" + wwn: + description: storage system unique identifier + type: str + sample: "AC1100051E1E1E1E1E1E1E1E1E1E1E1E" + model: + description: NetApp E-Series model number + type: str + sample: "5700" + controller: + description: controller list that contains identification, ip addresses, and certificate information for + each controller + type: complex + sample: [{"certificateStatus": "selfSigned", + "controllerId": "070000000000000000000001", + "ipAddresses": ["172.17.0.5", "3.3.3.3"]}] + drive_types: + description: all available storage system drive types + type: list + sample: ["sas", "fibre"] + unconfigured_space: + description: unconfigured storage system space in bytes + type: str + sample: "982259020595200" + array_status: + description: storage system status + type: str + sample: "optimal" + password_status: + description: storage system password status + type: str + sample: "invalid" + certificate_status: + description: storage system ssl certificate status + type: str + sample: "untrusted" + firmware_version: + description: storage system install firmware version + type: str + sample: "08.50.42.99" + chassis_serial: + description: storage system chassis serial number + type: str + sample: "SX0810032" + asup_enabled: + description: storage system auto-support status + type: bool + sample: True +""" + +from datetime import datetime +import re +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +try: + from ansible.module_utils.ansible_release import __version__ as ansible_version +except ImportError: + ansible_version = 'unknown' + +try: + from urlparse import urlparse, urlunparse +except ImportError: + from urllib.parse import urlparse, urlunparse + + +class Facts(NetAppESeriesModule): + def __init__(self): + web_services_version = "02.00.0000.0000" + super(Facts, self).__init__(ansible_options={}, + web_services_version=web_services_version, + supports_check_mode=True) + + def get_controllers(self): + """Retrieve a mapping of controller references to their labels.""" + controllers = list() + try: + rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." + % (self.ssid, str(err))) + + controllers.sort() + + controllers_dict = {} + i = ord('A') + for controller in controllers: + label = chr(i) + controllers_dict[controller] = label + i += 1 + + return controllers_dict + + def get_array_facts(self): + """Extract particular facts from the storage array graph""" + facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid) + controller_reference_label = self.get_controllers() + array_facts = None + hardware_inventory_facts = None + + # Get the storage array graph + try: + rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error))) + + # Get the storage array hardware inventory + try: + rc, hardware_inventory_facts = self.request("storage-systems/%s/hardware-inventory" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to obtain hardware inventory from storage array with id [%s]. Error [%s]" % (self.ssid, str(error))) + + # Get storage system specific key-value pairs + key_value_url = "key-values" + key_values = [] + if not self.is_embedded() and self.ssid.lower() not in ["0", "proxy"]: + key_value_url = "storage-systems/%s/forward/devmgr/v2/key-values" % self.ssid + try: + rc, key_values = self.request(key_value_url) + except Exception as error: + self.module.fail_json(msg="Failed to obtain embedded key-value database. Array [%s]. Error [%s]" % (self.ssid, str(error))) + + facts['netapp_storage_array'] = dict( + name=array_facts['sa']['saData']['storageArrayLabel'], + chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'], + firmware=array_facts['sa']['saData']['fwVersion'], + wwn=array_facts['sa']['saData']['saId']['worldWideName'], + segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'], + cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes']) + + facts['netapp_controllers'] = [ + dict( + name=controller_reference_label[controller['controllerRef']], + serial=controller['serialNumber'].strip(), + status=controller['status'], + ) for controller in array_facts['controller']] + + facts['netapp_hosts'] = [ + dict( + group_id=host['clusterRef'], + hosts_reference=host['hostRef'], + id=host['id'], + name=host['name'], + host_type_index=host['hostTypeIndex'], + ports=host['hostSidePorts'] + ) for host in array_facts['storagePoolBundle']['host']] + + facts['netapp_host_groups'] = [ + dict( + id=group['id'], + name=group['name'], + hosts=[host['name'] for host in facts['netapp_hosts'] if host['group_id'] == group['id']] + ) for group in array_facts['storagePoolBundle']['cluster']] + facts['netapp_host_groups'].append(dict( + id='0000000000000000000000000000000000000000', + name='default_hostgroup', + hosts=[host["name"] for host in facts['netapp_hosts'] if host['group_id'] == '0000000000000000000000000000000000000000'])) + + facts['netapp_host_types'] = [ + dict( + type=host_type['hostType'], + index=host_type['index'] + ) for host_type in array_facts['sa']['hostSpecificVals'] + if 'hostType' in host_type.keys() and host_type['hostType'] + # This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared. + ] + + facts['snapshot_images'] = [ + dict( + id=snapshot['id'], + status=snapshot['status'], + pit_capacity=snapshot['pitCapacity'], + creation_method=snapshot['creationMethod'], + reposity_cap_utilization=snapshot['repositoryCapacityUtilization'], + active_cow=snapshot['activeCOW'], + rollback_source=snapshot['isRollbackSource'] + ) for snapshot in array_facts['highLevelVolBundle']['pit']] + + facts['netapp_disks'] = [ + dict( + id=disk['id'], + available=disk['available'], + media_type=disk['driveMediaType'], + status=disk['status'], + usable_bytes=disk['usableCapacity'], + tray_ref=disk['physicalLocation']['trayRef'], + product_id=disk['productID'], + firmware_version=disk['firmwareVersion'], + serial_number=disk['serialNumber'].lstrip() + ) for disk in array_facts['drive']] + + facts['netapp_management_interfaces'] = [ + dict(controller=controller_reference_label[controller['controllerRef']], + name=iface['ethernet']['interfaceName'], + alias=iface['ethernet']['alias'], + channel=iface['ethernet']['channel'], + mac_address=iface['ethernet']['macAddr'], + remote_ssh_access=iface['ethernet']['rloginEnabled'], + link_status=iface['ethernet']['linkStatus'], + ipv4_enabled=iface['ethernet']['ipv4Enabled'], + ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""), + ipv4_address=iface['ethernet']['ipv4Address'], + ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'], + ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'], + ipv6_enabled=iface['ethernet']['ipv6Enabled'], + dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'], + dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] + if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []), + ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'], + ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] + if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else []) + ) for controller in array_facts['controller'] for iface in controller['netInterfaces']] + + facts['netapp_hostside_interfaces'] = [ + dict( + fc=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['fibre']['channel'], + link_status=iface['fibre']['linkStatus'], + current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']), + maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'fc'], + ib=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['ib']['channel'], + link_status=iface['ib']['linkState'], + mtu=iface['ib']['maximumTransmissionUnit'], + current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']), + maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'ib'], + iscsi=[dict(controller=controller_reference_label[controller['controllerRef']], + iqn=iface['iscsi']['iqn'], + link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'], + ipv4_enabled=iface['iscsi']['ipv4Enabled'], + ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'], + ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'], + ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'], + ipv6_enabled=iface['iscsi']['ipv6Enabled'], + mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'], + current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData'] + ['ethernetData']['currentInterfaceSpeed']), + supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData'] + ['ethernetData'] + ['supportedInterfaceSpeeds'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'iscsi' and iface['iscsi']['interfaceData']['type'] == 'ethernet'], + sas=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['sas']['channel'], + current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']), + maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']), + link_status=iface['sas']['iocPort']['state']) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'sas'])] + + facts['netapp_driveside_interfaces'] = [ + dict( + controller=controller_reference_label[controller['controllerRef']], + interface_type=interface['interfaceType'], + interface_speed=strip_interface_speed( + interface[interface['interfaceType']]['maximumInterfaceSpeed'] + if (interface['interfaceType'] == 'sata' or + interface['interfaceType'] == 'sas' or + interface['interfaceType'] == 'fibre') + else ( + interface[interface['interfaceType']]['currentSpeed'] + if interface['interfaceType'] == 'ib' + else ( + interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed'] + if interface['interfaceType'] == 'iscsi' else 'unknown' + ))), + ) + for controller in array_facts['controller'] + for interface in controller['driveInterfaces']] + + facts['netapp_storage_pools'] = [ + dict( + id=storage_pool['id'], + name=storage_pool['name'], + available_capacity=storage_pool['freeSpace'], + total_capacity=storage_pool['totalRaidedSpace'], + used_capacity=storage_pool['usedSpace'] + ) for storage_pool in array_facts['volumeGroup']] + + all_volumes = list(array_facts['volume'] + array_facts['highLevelVolBundle']['thinVolume']) + + facts['netapp_volumes'] = [ + dict( + id=v['id'], + name=v['name'], + parent_storage_pool_id=v['volumeGroupRef'], + capacity=v['capacity'], + is_thin_provisioned=v['thinProvisioned'], + workload=v['metadata'], + + ) for v in all_volumes] + + # Add access volume information to volumes when enabled. + if array_facts['sa']['accessVolume']['enabled']: + facts['netapp_volumes'].append(dict( + id=array_facts['sa']['accessVolume']['id'], + name="access_volume", + parent_storage_pool_id="", + capacity=array_facts['sa']['accessVolume']['capacity'], + is_thin_provisioned=False, + workload="")) + + facts['netapp_snapshot_consistency_groups'] = [] + for group in array_facts["highLevelVolBundle"]["pitConsistencyGroup"]: + reserve_capacity_full_policy = "purge" if group["repFullPolicy"] == "purgepit" else "reject" + group_info = {"id": group["id"], + "name": group["name"], + "reserve_capacity_full_policy": reserve_capacity_full_policy, + "rollback_priority": group["rollbackPriority"], + "base_volumes": [], + "pit_images": [], + "pit_views": {}} + + # Determine all consistency group base volumes. + volumes_by_id = {} + for pit_group in array_facts["highLevelVolBundle"]["pitGroup"]: + if pit_group["consistencyGroupRef"] == group["id"]: + for volume in array_facts["volume"]: + if volume["id"] == pit_group["baseVolume"]: + volumes_by_id.update({volume["id"]: volume["name"]}) + group_info["base_volumes"].append({"id": volume["id"], + "name": volume["name"], + "reserve_capacity_volume_id": pit_group["repositoryVolume"]}) + break + + # Determine all consistency group pit snapshot images. + group_pit_key_values = {} + for entry in key_values: + if re.search("ansible\\|%s\\|" % group["name"], entry["key"]): + pit_name = entry["key"].replace("ansible|%s|" % group["name"], "") + pit_values = entry["value"].split("|") + if len(pit_values) == 3: + timestamp, image_id, description = pit_values + group_pit_key_values.update({timestamp: {"name": pit_name, "description": description}}) + + pit_by_id = {} + for pit in array_facts["highLevelVolBundle"]["pit"]: + if pit["consistencyGroupId"] == group["id"]: + + if pit["pitTimestamp"] in group_pit_key_values.keys(): + pit_image = {"name": group_pit_key_values[pit["pitTimestamp"]]["name"], + "description": group_pit_key_values[pit["pitTimestamp"]]["description"], + "timestamp": datetime.fromtimestamp(int(pit["pitTimestamp"])).strftime("%Y-%m-%d %H:%M:%S")} + else: + pit_image = {"name": "", "description": "", + "timestamp": datetime.fromtimestamp(int(pit["pitTimestamp"])).strftime("%Y-%m-%d %H:%M:%S")} + group_info["pit_images"].append(pit_image) + pit_by_id.update({pit["id"]: pit_image}) + + # Determine all consistency group pit views. + for view in array_facts["highLevelVolBundle"]["pitView"]: + if view["consistencyGroupId"] == group["id"]: + view_timestamp = datetime.fromtimestamp(int(view["viewTime"])).strftime("%Y-%m-%d %H:%M:%S") + reserve_capacity_pct = int(round(float(view["repositoryCapacity"]) / float(view["baseVolumeCapacity"]) * 100)) + if view_timestamp in group_info["pit_views"].keys(): + group_info["pit_views"][view_timestamp]["volumes"].append({"name": view["name"], + "base_volume": volumes_by_id[view["baseVol"]], + "writable": view["accessMode"] == "readWrite", + "reserve_capacity_pct": reserve_capacity_pct, + "status": view["status"]}) + else: + group_info["pit_views"].update({view_timestamp: {"name": pit_by_id[view["basePIT"]]["name"], + "description": pit_by_id[view["basePIT"]]["description"], + "volumes": [{"name": view["name"], + "base_volume": volumes_by_id[view["baseVol"]], + "writable": view["accessMode"] == "readWrite", + "reserve_capacity_pct": reserve_capacity_pct, + "status": view["status"]}]}}) + + facts['netapp_snapshot_consistency_groups'].append(group_info) + + lun_mappings = dict() + for host in facts['netapp_hosts']: + lun_mappings.update({host["name"]: []}) + for host in facts['netapp_host_groups']: + lun_mappings.update({host["name"]: []}) + + facts['netapp_default_hostgroup_access_volume_lun'] = None + for lun in [a['lun'] for a in array_facts['storagePoolBundle']['lunMapping'] + if a['type'] == 'all' and a['mapRef'] == '0000000000000000000000000000000000000000']: + facts['netapp_default_hostgroup_access_volume_lun'] = lun + + # Get all host mappings + host_mappings = dict() + for host_mapping in [h for h in array_facts['storagePoolBundle']['lunMapping'] if h['type'] == 'host']: + for host_name in [h['name'] for h in facts['netapp_hosts'] if h['id'] == host_mapping['mapRef']]: + for volume in [v['name'] for v in facts['netapp_volumes'] if v['id'] == host_mapping['volumeRef']]: + if host_name in host_mappings.keys(): + host_mappings[host_name].append((volume, host_mapping['lun'])) + else: + host_mappings[host_name] = [(volume, host_mapping['lun'])] + + # Get all host group mappings + group_mappings = dict() + for group_mapping in [h for h in array_facts['storagePoolBundle']['lunMapping'] if h['type'] == 'cluster']: + for group_name, group_hosts in [(g['name'], g['hosts']) for g in facts['netapp_host_groups'] if g['id'] == group_mapping['mapRef']]: + for volume in [v['name'] for v in facts['netapp_volumes'] if v['id'] == group_mapping['volumeRef']]: + if group_name in group_mappings.keys(): + group_mappings[group_name].append((volume, group_mapping['lun'])) + else: + group_mappings[group_name] = [(volume, group_mapping['lun'])] + + for host_name in [h for h in group_hosts if h in host_mappings.keys()]: + if host_name in host_mappings.keys(): + host_mappings[host_name].append((volume, group_mapping['lun'])) + else: + host_mappings[host_name] = [(volume, group_mapping['lun'])] + + facts['netapp_luns_by_target'] = lun_mappings + if host_mappings: + facts['netapp_luns_by_target'].update(host_mappings) + if group_mappings: + facts['netapp_luns_by_target'].update(group_mappings) + + # Add all host mappings to respective groups mappings + for host_group in facts['netapp_host_groups']: + group_name = host_group['name'] + for host in host_group['hosts']: + facts['netapp_luns_by_target'][group_name].extend(facts['netapp_luns_by_target'][host]) + + # Remove duplicate entries + for obj in facts['netapp_luns_by_target'].keys(): + tmp = dict(facts['netapp_luns_by_target'][obj]) + facts['netapp_luns_by_target'][obj] = [(k, tmp[k]) for k in tmp.keys()] + + workload_tags = None + try: + rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid) + + facts['netapp_workload_tags'] = [ + dict( + id=workload_tag['id'], + name=workload_tag['name'], + attributes=workload_tag['workloadAttributes'] + ) for workload_tag in workload_tags] + + targets = array_facts["storagePoolBundle"]["target"] + + facts['netapp_hostside_io_interfaces'] = [] + if "ioInterface" in array_facts: + for interface in array_facts["ioInterface"]: + + # Select only the host side channels + if interface["channelType"] == "hostside": + interface_type = interface["ioInterfaceTypeData"]["interfaceType"] + if interface_type == "fibre": + interface_type = "fc" + elif interface_type == "nvmeCouplingDriver": + interface_type = "couplingDriverNvme" + + interface_data = interface["ioInterfaceTypeData"][interface_type] + command_protocol_properties = interface["commandProtocolPropertiesList"]["commandProtocolProperties"] + + # Build generic information for each interface entry + interface_info = {"protocol": "unknown", + "interface_reference": interface_data["interfaceRef"], + "controller_reference": interface["controllerRef"], + "channel_port_reference": interface_data["channelPortRef"] if "channelPortRef" in interface_data else "", + "controller": controller_reference_label[interface["controllerRef"]], + "channel": interface_data["channel"], + "part": "unknown", + "link_status": "unknown", + "speed": {"current": "unknown", "maximum": "unknown", "supported": []}, + "mtu": None, + "guid": None, + "lid": None, + "nqn": None, + "iqn": None, + "wwnn": None, + "wwpn": None, + "ipv4": None, # enabled, config_method, address, subnet, gateway + "ipv6": None} # for expansion if needed + + # Determine storage target identifiers + controller_iqn = "unknown" + controller_nqn = "unknown" + for target in targets: + if target["nodeName"]["ioInterfaceType"] == "nvmeof": + controller_nqn = target["nodeName"]["nvmeNodeName"] + if target["nodeName"]["ioInterfaceType"] == "iscsi": + controller_iqn = target["nodeName"]["iscsiNodeName"] + + # iSCSI IO interface + if interface_type == "iscsi": + interface_info.update({"ipv4": {"enabled": interface_data["ipv4Enabled"], + "config_method": interface_data["ipv4Data"]["ipv4AddressConfigMethod"], + "address": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4Address"], + "subnet": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4SubnetMask"], + "gateway": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4GatewayAddress"]}}) + + # InfiniBand (iSER) protocol + if interface_data["interfaceData"]["type"] == "infiniband" and interface_data["interfaceData"]["infinibandData"]["isIser"]: + interface_info.update({"protocol": "ib_iser", + "iqn": controller_iqn}) + + # Get more details from hardware-inventory + for ib_port in hardware_inventory_facts["ibPorts"]: + if ib_port["channelPortRef"] == interface_info["channel_port_reference"]: + interface_info.update({"link_status": ib_port["linkState"], + "guid": ib_port["globalIdentifier"], + "lid": ib_port["localIdentifier"], + "speed": {"current": strip_interface_speed(ib_port["currentSpeed"]), + "maximum": strip_interface_speed(ib_port["supportedSpeed"])[-1], + "supported": strip_interface_speed(ib_port["supportedSpeed"])}}) + + # iSCSI protocol + elif interface_data["interfaceData"]["type"] == "ethernet": + ethernet_data = interface_data["interfaceData"]["ethernetData"] + interface_info.update({"protocol": "iscsi", + "iqn": controller_iqn}) + interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]), + "link_status": ethernet_data["linkStatus"], + "mtu": ethernet_data["maximumFramePayloadSize"], + "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]), + "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]), + "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}}) + + # Fibre Channel IO interface + elif interface_type == "fc": + interface_info.update({"wwnn": interface_data["nodeName"], + "wwpn": interface_data["addressId"], + "part": interface_data["part"], + "link_status": interface_data["linkStatus"], + "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]), + "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]), + "supported": "unknown"}}) + + # NVMe over fibre channel protocol + if (command_protocol_properties and command_protocol_properties[0]["commandProtocol"] == "nvme" and + command_protocol_properties[0]["nvmeProperties"]["commandSet"] == "nvmeof" and + command_protocol_properties[0]["nvmeProperties"]["nvmeofProperties"]["fcProperties"]): + interface_info.update({"protocol": "nvme_fc", + "nqn": controller_nqn}) + + # Fibre channel protocol + else: + interface_info.update({"protocol": "fc"}) + + # SAS IO interface + elif interface_type == "sas": + interface_info.update({"protocol": "sas", + "wwpn": interface_data["addressId"], + "part": interface_data["part"], + "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]), + "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]), + "supported": "unknown"}}) + + # Infiniband IO interface + elif interface_type == "ib": + interface_info.update({"link_status": interface_data["linkState"], + "speed": {"current": strip_interface_speed(interface_data["currentSpeed"]), + "maximum": strip_interface_speed(interface_data["supportedSpeed"])[-1], + "supported": strip_interface_speed(interface_data["supportedSpeed"])}, + "mtu": interface_data["maximumTransmissionUnit"], + "guid": interface_data["globalIdentifier"], + "lid": interface_data["localIdentifier"]}) + + # Determine protocol (NVMe over Infiniband, InfiniBand iSER, InfiniBand SRP) + if interface_data["isNVMeSupported"]: + interface_info.update({"protocol": "nvme_ib", + "nqn": controller_nqn}) + elif interface_data["isISERSupported"]: + interface_info.update({"protocol": "ib_iser", + "iqn": controller_iqn}) + elif interface_data["isSRPSupported"]: + interface_info.update({"protocol": "ib_srp"}) + + # Determine command protocol information + if command_protocol_properties: + for command_protocol_property in command_protocol_properties: + if command_protocol_property["commandProtocol"] == "nvme": + if command_protocol_property["nvmeProperties"]["commandSet"] == "nvmeof": + ip_address_data = command_protocol_property["nvmeProperties"]["nvmeofProperties"]["ibProperties"]["ipAddressData"] + if ip_address_data["addressType"] == "ipv4": + interface_info.update({"ipv4": {"enabled": True, + "config_method": "configStatic", + "address": ip_address_data["ipv4Data"]["ipv4Address"], + "subnet": ip_address_data["ipv4Data"]["ipv4SubnetMask"], + "gateway": ip_address_data["ipv4Data"]["ipv4GatewayAddress"]}}) + + elif command_protocol_property["commandProtocol"] == "scsi": + if command_protocol_property["scsiProperties"]["scsiProtocolType"] == "iser": + ipv4_data = command_protocol_property["scsiProperties"]["iserProperties"]["ipv4Data"] + interface_info.update({"ipv4": {"enabled": True, + "config_method": ipv4_data["ipv4AddressConfigMethod"], + "address": ipv4_data["ipv4AddressData"]["ipv4Address"], + "subnet": ipv4_data["ipv4AddressData"]["ipv4SubnetMask"], + "gateway": ipv4_data["ipv4AddressData"]["ipv4GatewayAddress"]}}) + + # Ethernet IO interface + elif interface_type == "ethernet": + ethernet_data = interface_data["interfaceData"]["ethernetData"] + interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]), + "link_status": ethernet_data["linkStatus"], + "mtu": ethernet_data["maximumFramePayloadSize"], + "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]), + "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]), + "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}}) + + # Determine command protocol information + if command_protocol_properties: + for command_protocol_property in command_protocol_properties: + if command_protocol_property["commandProtocol"] == "nvme": + if command_protocol_property["nvmeProperties"]["commandSet"] == "nvmeof": + + nvmeof_properties = command_protocol_property["nvmeProperties"]["nvmeofProperties"] + if nvmeof_properties["provider"] == "providerRocev2": + ipv4_data = nvmeof_properties["roceV2Properties"]["ipv4Data"] + interface_info.update({"protocol": "nvme_roce", + "nqn": controller_nqn}) + interface_info.update({"ipv4": {"enabled": nvmeof_properties["roceV2Properties"]["ipv4Enabled"], + "config_method": ipv4_data["ipv4AddressConfigMethod"], + "address": ipv4_data["ipv4AddressData"]["ipv4Address"], + "subnet": ipv4_data["ipv4AddressData"]["ipv4SubnetMask"], + "gateway": ipv4_data["ipv4AddressData"]["ipv4GatewayAddress"]}}) + + + facts['netapp_hostside_io_interfaces'].append(interface_info) + + # Gather information from controller->hostInterfaces if available (This is a deprecated data structure. Prefer information from ioInterface. + for controller in array_facts['controller']: + if "hostInterfaces" in controller.keys(): + for interface in controller['hostInterfaces']: + + # Ignore any issue with this data structure since its a deprecated data structure. + try: + interface_type = interface["interfaceType"] + interface_data = interface["fibre" if interface_type == "fc" else interface_type] + + # Build generic information for each interface entry + interface_info = {"protocol": "unknown", + "interface_reference": interface_data["interfaceRef"], + "controller_reference": controller["controllerRef"], + "channel_port_reference": interface_data["channelPortRef"] if "channelPortRef" in interface_data else "", + "controller": controller_reference_label[controller["controllerRef"]], + "channel": interface_data["channel"], + "part": "unknown", + "link_status": "unknown", + "speed": {"current": "unknown", "maximum": "unknown", "supported": []}, + "mtu": None, + "guid": None, + "lid": None, + "nqn": None, + "iqn": None, + "wwnn": None, + "wwpn": None, + "ipv4": None, # enabled, config_method, address, subnet, gateway + "ipv6": None} # for expansion if needed + + # Add target information + for target in targets: + if target["nodeName"]["ioInterfaceType"] == "nvmeof": + interface_info.update({"nqn": target["nodeName"]["nvmeNodeName"]}) + if target["nodeName"]["ioInterfaceType"] == "iscsi": + interface_info.update({"iqn": target["nodeName"]["iscsiNodeName"]}) + + # iSCSI IO interface + if interface_type == "iscsi": + interface_info.update({"ipv4": {"enabled": interface_data["ipv4Enabled"], + "config_method": interface_data["ipv4Data"]["ipv4AddressConfigMethod"], + "address": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4Address"], + "subnet": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4SubnetMask"], + "gateway": interface_data["ipv4Data"]["ipv4AddressData"]["ipv4GatewayAddress"]}}) + # InfiniBand (iSER) protocol + if interface_data["interfaceData"]["type"] == "infiniband" and interface_data["interfaceData"]["infinibandData"]["isIser"]: + interface_info.update({"protocol": "ib_iser"}) + + # Get more details from hardware-inventory + for ib_port in hardware_inventory_facts["ibPorts"]: + if ib_port["channelPortRef"] == interface_info["channel_port_reference"]: + interface_info.update({"link_status": ib_port["linkState"], + "guid": ib_port["globalIdentifier"], + "lid": ib_port["localIdentifier"], + "speed": {"current": strip_interface_speed(ib_port["currentSpeed"]), + "maximum": strip_interface_speed(ib_port["supportedSpeed"])[-1], + "supported": strip_interface_speed(ib_port["supportedSpeed"])}}) + # iSCSI protocol + elif interface_data["interfaceData"]["type"] == "ethernet": + ethernet_data = interface_data["interfaceData"]["ethernetData"] + interface_info.update({"protocol": "iscsi"}) + interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]), + "link_status": ethernet_data["linkStatus"], + "mtu": ethernet_data["maximumFramePayloadSize"], + "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]), + "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]), + "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}}) + # Fibre Channel IO interface + elif interface_type == "fc": + interface_info.update({"protocol": "fc", + "wwnn": interface_data["nodeName"], + "wwpn": interface_data["addressId"], + "link_status": interface_data["linkStatus"], + "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]), + "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]), + "supported": "unknown"}}) + # SAS IO interface + elif interface_type == "sas": + interface_info.update({"protocol": "sas", + "wwpn": interface_data["iocPort"]["portTypeData"]["portIdentifier"], + "part": interface_data["part"], + "speed": {"current": strip_interface_speed(interface_data["currentInterfaceSpeed"]), + "maximum": strip_interface_speed(interface_data["maximumInterfaceSpeed"]), + "supported": "unknown"}}) + # Infiniband IO interface + elif interface_type == "ib": + interface_info.update({"link_status": interface_data["linkState"], + "speed": {"current": strip_interface_speed(interface_data["currentSpeed"]), + "maximum": strip_interface_speed(interface_data["supportedSpeed"])[-1], + "supported": strip_interface_speed(interface_data["supportedSpeed"])}, + "mtu": interface_data["maximumTransmissionUnit"], + "guid": interface_data["globalIdentifier"], + "lid": interface_data["localIdentifier"]}) + + # Determine protocol (NVMe over Infiniband, InfiniBand iSER, InfiniBand SRP) + if interface_data["isNVMeSupported"]: + interface_info.update({"protocol": "nvme_ib"}) + elif interface_data["isISERSupported"]: + interface_info.update({"protocol": "ib_iser"}) + elif interface_data["isSRPSupported"]: + interface_info.update({"protocol": "ib_srp"}) + + # Ethernet IO interface + elif interface_type == "ethernet": + ethernet_data = interface_data["interfaceData"]["ethernetData"] + interface_info.update({"part": "%s,%s" % (ethernet_data["partData"]["vendorName"], ethernet_data["partData"]["partNumber"]), + "link_status": ethernet_data["linkStatus"], + "mtu": ethernet_data["maximumFramePayloadSize"], + "speed": {"current": strip_interface_speed(ethernet_data["currentInterfaceSpeed"]), + "maximum": strip_interface_speed(ethernet_data["maximumInterfaceSpeed"]), + "supported": strip_interface_speed(ethernet_data["supportedInterfaceSpeeds"])}}) + + # Only add interface if not already added (i.e. was part of ioInterface structure) + for existing_hostside_io_interfaces in facts['netapp_hostside_io_interfaces']: + if existing_hostside_io_interfaces["interface_reference"] == interface_info["interface_reference"]: + break + else: + facts['netapp_hostside_io_interfaces'].append(interface_info) + except Exception as error: + pass + + # Create a dictionary of volume lists keyed by host names + facts['netapp_volumes_by_initiators'] = dict() + for mapping in array_facts['storagePoolBundle']['lunMapping']: + for host in facts['netapp_hosts']: + if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']: + if host['name'] not in facts['netapp_volumes_by_initiators'].keys(): + facts['netapp_volumes_by_initiators'].update({host['name']: []}) + + # Determine host io interface protocols + host_types = [port['type'] for port in host['ports']] + hostside_io_interface_protocols = [] + host_port_protocols = [] + host_port_information = {} + for interface in facts['netapp_hostside_io_interfaces']: + hostside_io_interface_protocols.append(interface["protocol"]) + for host_type in host_types: + if host_type == "iscsi" and interface["protocol"] in ["iscsi", "ib_iser"]: + host_port_protocols.append(interface["protocol"]) + if interface["protocol"] in host_port_information: + # Skip duplicate entries into host_port_information + for host_port_info in host_port_information[interface["protocol"]]: + if interface["interface_reference"] == host_port_info["interface_reference"]: + break + else: + host_port_information[interface["protocol"]].append(interface) + else: + host_port_information.update({interface["protocol"]: [interface]}) + + elif host_type == "fc" and interface["protocol"] in ["fc"]: + host_port_protocols.append(interface["protocol"]) + if interface["protocol"] in host_port_information: + # Skip duplicate entries into host_port_information + for host_port_info in host_port_information[interface["protocol"]]: + if interface["interface_reference"] == host_port_info["interface_reference"]: + break + else: + host_port_information[interface["protocol"]].append(interface) + else: + host_port_information.update({interface["protocol"]: [interface]}) + + elif host_type == "sas" and interface["protocol"] in ["sas"]: + host_port_protocols.append(interface["protocol"]) + if interface["protocol"] in host_port_information: + # Skip duplicate entries into host_port_information + for host_port_info in host_port_information[interface["protocol"]]: + if interface["interface_reference"] == host_port_info["interface_reference"]: + break + else: + host_port_information[interface["protocol"]].append(interface) + else: + host_port_information.update({interface["protocol"]: [interface]}) + + elif host_type == "ib" and interface["protocol"] in ["ib_iser", "ib_srp"]: + host_port_protocols.append(interface["protocol"]) + if interface["protocol"] in host_port_information: + # Skip duplicate entries into host_port_information + for host_port_info in host_port_information[interface["protocol"]]: + if interface["interface_reference"] == host_port_info["interface_reference"]: + break + else: + host_port_information[interface["protocol"]].append(interface) + else: + host_port_information.update({interface["protocol"]: [interface]}) + + elif host_type == "nvmeof" and interface["protocol"] in ["nvme_ib", "nvme_fc", "nvme_roce"]: + host_port_protocols.append(interface["protocol"]) + if interface["protocol"] in host_port_information: + # Skip duplicate entries into host_port_information + for host_port_info in host_port_information[interface["protocol"]]: + if interface["interface_reference"] == host_port_info["interface_reference"]: + break + else: + host_port_information[interface["protocol"]].append(interface) + else: + host_port_information.update({interface["protocol"]: [interface]}) + + for volume in all_volumes: + storage_pool = [pool["name"] for pool in facts['netapp_storage_pools'] if pool["id"] == volume["volumeGroupRef"]][0] + + if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]: + + # Determine workload name if there is one + workload_name = "" + metadata = dict() + for volume_tag in volume['metadata']: + if volume_tag['key'] == 'workloadId': + for workload_tag in facts['netapp_workload_tags']: + if volume_tag['value'] == workload_tag['id']: + workload_name = workload_tag['name'] + metadata = dict((entry['key'], entry['value']) + for entry in workload_tag['attributes'] + if entry['key'] != 'profileId') + + # Get volume specific metadata tags + volume_metadata_raw = dict() + volume_metadata = dict() + for entry in volume['metadata']: + volume_metadata_raw.update({entry["key"]: entry["value"]}) + + for sorted_key in sorted(volume_metadata_raw.keys()): + if re.match(".*~[0-9]$", sorted_key): + key = re.sub("~[0-9]$", "", sorted_key) + if key in volume_metadata: + volume_metadata[key] = volume_metadata[key] + volume_metadata_raw[sorted_key] + else: + volume_metadata.update({key: volume_metadata_raw[sorted_key]}) + else: + volume_metadata.update({sorted_key: volume_metadata_raw[sorted_key]}) + + # Determine drive count + stripe_count = 0 + vg_drive_num = sum(1 for d in array_facts['drive'] if d['currentVolumeGroupRef'] == volume['volumeGroupRef'] and not d['hotSpare']) + + if volume['raidLevel'] == "raidDiskPool": + stripe_count = 8 + if volume['raidLevel'] == "raid0": + stripe_count = vg_drive_num + if volume['raidLevel'] == "raid1": + stripe_count = int(vg_drive_num / 2) + if volume['raidLevel'] in ["raid3", "raid5"]: + stripe_count = vg_drive_num - 1 + if volume['raidLevel'] == "raid6": + stripe_count = vg_drive_num - 2 + + volume_info = {"type": volume['objectType'], + "name": volume['name'], + "storage_pool": storage_pool, + "host_types": set(host_types), + "host_port_information": host_port_information, + "host_port_protocols": set(host_port_protocols), + "hostside_io_interface_protocols": set(hostside_io_interface_protocols), + "id": volume['id'], + "wwn": volume['wwn'], + "eui": volume['extendedUniqueIdentifier'], + "workload_name": workload_name, + "workload_metadata": metadata, + "meta_data": metadata, + "volume_metadata": volume_metadata, + "raid_level": volume['raidLevel'], + "segment_size_kb": int(volume['segmentSize'] / 1024), + "stripe_count": stripe_count} + facts['netapp_volumes_by_initiators'][host['name']].append(volume_info) + + # Use the base volume to populate related details for snapshot volumes. + for pit_view_volume in array_facts["highLevelVolBundle"]["pitView"]: + if volume["id"] == pit_view_volume["baseVol"]: + pit_view_volume_info = volume_info.copy() + pit_view_volume_info.update({"type": pit_view_volume["objectType"], + "name": pit_view_volume['name'], + "id": pit_view_volume['id'], + "wwn": pit_view_volume['wwn'], + "eui": pit_view_volume['extendedUniqueIdentifier']}) + facts['netapp_volumes_by_initiators'][host['name']].append(pit_view_volume_info) + + features = [feature for feature in array_facts['sa']['capabilities']] + features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures'] + if feature['isEnabled']]) + features = list(set(features)) # ensure unique + features.sort() + facts['netapp_enabled_features'] = features + + return facts + + def get_facts(self): + """Get the embedded or web services proxy information.""" + facts = self.get_array_facts() + + facts_from_proxy = not self.is_embedded() + facts.update({"facts_from_proxy": facts_from_proxy}) + + self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid, + storage_array_facts=facts) + + +def strip_interface_speed(speed): + """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'""" + if isinstance(speed, list): + result = [re.match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed] + result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp] + result = ["auto" if re.match(r"auto", sp) else sp for sp in result] + else: + result = re.match(r"speed[0-9]{1,3}[gm]", speed) + result = result.group().replace("speed", "") if result else "unknown" + result = "auto" if re.match(r"auto", result.lower()) else result + return result + + +def main(): + facts = Facts() + facts.get_facts() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py new file mode 100644 index 000000000..fb7922362 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_firmware.py @@ -0,0 +1,604 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_firmware +short_description: NetApp E-Series manage firmware. +description: + - Ensure specific firmware versions are activated on E-Series storage system. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + nvsram: + description: + - Path to the NVSRAM file. + - NetApp recommends upgrading the NVSRAM when upgrading firmware. + - Due to concurrency issues, use M(na_santricity_proxy_firmware_upload) to upload firmware and nvsram to SANtricity Web Services Proxy when + upgrading multiple systems at the same time on the same instance of the proxy. + type: str + required: false + firmware: + description: + - Path to the firmware file. + - Due to concurrency issues, use M(na_santricity_proxy_firmware_upload) to upload firmware and nvsram to SANtricity Web Services Proxy when + upgrading multiple systems at the same time on the same instance of the proxy. + type: str + required: True + wait_for_completion: + description: + - This flag will cause module to wait for any upgrade actions to complete. + - When changes are required to both firmware and nvsram and task is executed against SANtricity Web Services Proxy, + the firmware will have to complete before nvsram can be installed. + type: bool + default: false + clear_mel_events: + description: + - This flag will force firmware to be activated in spite of the storage system mel-event issues. + - Warning! This will clear all storage system mel-events. Use at your own risk! + type: bool + default: false +""" +EXAMPLES = """ +- name: Ensure correct firmware versions + na_santricity_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + nvsram: "path/to/nvsram" + firmware: "path/to/bundle" + wait_for_completion: true + clear_mel_events: true +- name: Ensure correct firmware versions + na_santricity_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + nvsram: "path/to/nvsram" + firmware: "path/to/firmware" +""" +RETURN = """ +msg: + description: Status and version of firmware and NVSRAM. + type: str + returned: always + sample: +""" +import os +import multiprocessing +import threading + +from time import sleep +from ansible.module_utils import six +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request +from ansible.module_utils._text import to_native + + +class NetAppESeriesFirmware(NetAppESeriesModule): + COMPATIBILITY_CHECK_TIMEOUT_SEC = 60 + REBOOT_TIMEOUT_SEC = 30 * 60 + MINIMUM_PROXY_VERSION = "04.10.00.0000" + + def __init__(self): + ansible_options = dict( + nvsram=dict(type="str", required=False), + firmware=dict(type="str", required=True), + wait_for_completion=dict(type="bool", default=False), + clear_mel_events=dict(type="bool", default=False)) + + super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.nvsram = args["nvsram"] + self.firmware = args["firmware"] + self.wait_for_completion = args["wait_for_completion"] + self.clear_mel_events = args["clear_mel_events"] + + self.nvsram_name = None + self.firmware_name = None + self.is_bundle_cache = None + self.firmware_version_cache = None + self.nvsram_version_cache = None + self.upgrade_required = False + self.upgrade_in_progress = False + self.module_info = dict() + + if self.nvsram: + self.nvsram_name = os.path.basename(self.nvsram) + if self.firmware: + self.firmware_name = os.path.basename(self.firmware) + + self.last_known_event = -1 + self.is_firmware_activation_started_mel_event_count = 1 + self.is_nvsram_download_completed_mel_event_count = 1 + self.proxy_wait_for_upgrade_mel_event_count = 1 + + def is_upgrade_in_progress(self): + """Determine whether an upgrade is already in progress.""" + in_progress = False + + if self.is_proxy(): + try: + rc, status = self.request("storage-systems/%s/cfw-upgrade" % self.ssid) + in_progress = status["running"] + except Exception as error: + if "errorMessage" in to_native(error): + self.module.warn("Failed to retrieve upgrade status. Array [%s]. Error [%s]." % (self.ssid, error)) + in_progress = False + else: + self.module.fail_json(msg="Failed to retrieve upgrade status. Array [%s]. Error [%s]." % (self.ssid, error)) + else: + in_progress = False + + return in_progress + + def is_firmware_bundled(self): + """Determine whether supplied firmware is bundle.""" + if self.is_bundle_cache is None: + with open(self.firmware, "rb") as fh: + signature = fh.read(16).lower() + + if b"firmware" in signature: + self.is_bundle_cache = False + elif b"combined_content" in signature: + self.is_bundle_cache = True + else: + self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid)) + + return self.is_bundle_cache + + def firmware_version(self): + """Retrieve firmware version of the firmware file. Return: bytes string""" + if self.firmware_version_cache is None: + + # Search firmware file for bundle or firmware version + with open(self.firmware, "rb") as fh: + line = fh.readline() + while line: + if self.is_firmware_bundled(): + if b'displayableAttributeList=' in line: + for item in line[25:].split(b','): + key, value = item.split(b"|") + if key == b'VERSION': + self.firmware_version_cache = value.strip(b"\n") + break + elif b"Version:" in line: + self.firmware_version_cache = line.split()[-1].strip(b"\n") + break + line = fh.readline() + else: + self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid)) + return self.firmware_version_cache + + def nvsram_version(self): + """Retrieve NVSRAM version of the NVSRAM file. Return: byte string""" + if self.nvsram_version_cache is None: + + with open(self.nvsram, "rb") as fh: + line = fh.readline() + while line: + if b".NVSRAM Configuration Number" in line: + self.nvsram_version_cache = line.split(b'"')[-2] + break + line = fh.readline() + else: + self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid)) + return self.nvsram_version_cache + + def check_system_health(self): + """Ensure E-Series storage system is healthy. Works for both embedded and proxy web services.""" + try: + rc, response = self.request("storage-systems/%s/health-check" % self.ssid, method="POST") + return response["successful"] + except Exception as error: + self.module.fail_json(msg="Health check failed! Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + def embedded_check_compatibility(self): + """Verify files are compatible with E-Series storage system.""" + if self.nvsram: + self.embedded_check_nvsram_compatibility() + if self.firmware: + self.embedded_check_bundle_compatibility() + + def embedded_check_nvsram_compatibility(self): + """Verify the provided NVSRAM is compatible with E-Series storage system.""" + files = [("nvsramimage", self.nvsram_name, self.nvsram)] + headers, data = create_multipart_formdata(files=files) + compatible = {} + try: + rc, compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid, method="POST", data=data, headers=headers) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + if not compatible["signatureTestingPassed"]: + self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram) + if not compatible["fileCompatible"]: + self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram) + + # Determine whether nvsram upgrade is required + for module in compatible["versionContents"]: + if module["bundledVersion"] != module["onboardVersion"]: + self.upgrade_required = True + + # Update bundle info + self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}}) + + def embedded_check_bundle_compatibility(self): + """Verify the provided firmware bundle is compatible with E-Series storage system.""" + files = [("files[]", "blob", self.firmware)] + headers, data = create_multipart_formdata(files=files, send_8kb=True) + compatible = {} + try: + rc, compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid, method="POST", data=data, headers=headers) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + # Determine whether valid and compatible firmware + if not compatible["signatureTestingPassed"]: + self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware) + if not compatible["fileCompatible"]: + self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware) + + # Determine whether bundle upgrade is required + for module in compatible["versionContents"]: + bundle_module_version = module["bundledVersion"].split(".") + onboard_module_version = module["onboardVersion"].split(".") + version_minimum_length = min(len(bundle_module_version), len(onboard_module_version)) + + if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]: + self.upgrade_required = True + + # Build the modules information for logging purposes + self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}}) + + def embedded_firmware_activate(self): + """Activate firmware.""" + rc, response = self.request("firmware/embedded-firmware/activate", method="POST", ignore_errors=True, timeout=10) + if rc == "422": + self.module.fail_json(msg="Failed to activate the staged firmware. Array Id [%s]. Error [%s]" % (self.ssid, response)) + + def embedded_firmware_download(self): + """Execute the firmware download.""" + if self.nvsram: + firmware_url = "firmware/embedded-firmware?nvsram=true&staged=true" + headers, data = create_multipart_formdata(files=[("nvsramfile", self.nvsram_name, self.nvsram), + ("dlpfile", self.firmware_name, self.firmware)]) + else: + firmware_url = "firmware/embedded-firmware?nvsram=false&staged=true" + headers, data = create_multipart_formdata(files=[("dlpfile", self.firmware_name, self.firmware)]) + + # Stage firmware and nvsram + try: + + rc, response = self.request(firmware_url, method="POST", data=data, headers=headers, timeout=(30 * 60)) + except Exception as error: + self.module.fail_json(msg="Failed to stage firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + # Activate firmware + activate_thread = threading.Thread(target=self.embedded_firmware_activate) + activate_thread.start() + self.wait_for_reboot() + + def wait_for_reboot(self): + """Wait for controller A to fully reboot and web services running""" + reboot_started = False + reboot_completed = False + self.module.log("Controller firmware: Reboot commencing. Array Id [%s]." % self.ssid) + while self.wait_for_completion and not (reboot_started and reboot_completed): + try: + rc, response = self.request("storage-systems/%s/symbol/pingController?controller=a&verboseErrorResponse=true" + % self.ssid, method="POST", timeout=10, log_request=False) + + if reboot_started and response == "ok": + self.module.log("Controller firmware: Reboot completed. Array Id [%s]." % self.ssid) + reboot_completed = True + sleep(2) + except Exception as error: + if not reboot_started: + self.module.log("Controller firmware: Reboot started. Array Id [%s]." % self.ssid) + reboot_started = True + continue + + def firmware_event_logger(self): + """Determine if firmware activation has started.""" + # Determine the last known event + try: + rc, events = self.request("storage-systems/%s/events" % self.ssid) + for event in events: + if int(event["eventNumber"]) > int(self.last_known_event): + self.last_known_event = event["eventNumber"] + except Exception as error: + self.module.fail_json(msg="Failed to determine last known event. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + while True: + try: + rc, events = self.request("storage-systems/%s/events?lastKnown=%s&wait=1" % (self.ssid, self.last_known_event), log_request=False) + for event in events: + if int(event["eventNumber"]) > int(self.last_known_event): + self.last_known_event = event["eventNumber"] + + # Log firmware events + if event["eventType"] == "firmwareDownloadEvent": + self.module.log("%s" % event["status"]) + if event["status"] == "informational" and event["statusMessage"]: + self.module.log("Controller firmware: %s Array Id [%s]." % (event["statusMessage"], self.ssid)) + + # When activation is successful, finish thread + if event["status"] == "activate_success": + self.module.log("Controller firmware activated. Array Id [%s]." % self.ssid) + return + except Exception as error: + pass + + def wait_for_web_services(self): + """Wait for web services to report firmware and nvsram upgrade.""" + # Wait for system to reflect changes + for count in range(int(self.REBOOT_TIMEOUT_SEC / 5)): + try: + if self.is_firmware_bundled(): + firmware_rc, firmware_version = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/" + "codeVersions[codeModule='bundleDisplay']" % self.ssid, log_request=False) + current_firmware_version = six.b(firmware_version[0]["versionString"]) + else: + firmware_rc, firmware_version = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" + % self.ssid, log_request=False) + current_firmware_version = six.b(firmware_version[0]) + + nvsram_rc, nvsram_version = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid, log_request=False) + current_nvsram_version = six.b(nvsram_version[0]) + + if current_firmware_version == self.firmware_version() and (not self.nvsram or current_nvsram_version == self.nvsram_version()): + break + except Exception as error: + pass + sleep(5) + else: + self.module.fail_json(msg="Timeout waiting for Santricity Web Services. Array [%s]" % self.ssid) + + # Wait for system to be optimal + for count in range(int(self.REBOOT_TIMEOUT_SEC / 5)): + try: + rc, response = self.request("storage-systems/%s" % self.ssid, log_request=False) + + if response["status"] == "optimal": + self.upgrade_in_progress = False + break + except Exception as error: + pass + sleep(5) + else: + self.module.fail_json(msg="Timeout waiting for storage system to return to optimal status. Array [%s]" % self.ssid) + + def embedded_upgrade(self): + """Upload and activate both firmware and NVSRAM.""" + download_thread = threading.Thread(target=self.embedded_firmware_download) + event_thread = threading.Thread(target=self.firmware_event_logger) + download_thread.start() + event_thread.start() + download_thread.join() + event_thread.join() + + def proxy_check_nvsram_compatibility(self, retries=10): + """Verify nvsram is compatible with E-Series storage system.""" + self.module.log("Checking nvsram compatibility...") + data = {"storageDeviceIds": [self.ssid]} + try: + rc, check = self.request("firmware/compatibility-check", method="POST", data=data) + except Exception as error: + if retries: + sleep(1) + self.proxy_check_nvsram_compatibility(retries - 1) + else: + self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + for count in range(int(self.COMPATIBILITY_CHECK_TIMEOUT_SEC / 5)): + try: + rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"]) + except Exception as error: + continue + + if not response["checkRunning"]: + for result in response["results"][0]["nvsramFiles"]: + if result["filename"] == self.nvsram_name: + return + self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid)) + sleep(5) + + self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]." % self.ssid) + + def proxy_check_firmware_compatibility(self, retries=10): + """Verify firmware is compatible with E-Series storage system.""" + check = {} + try: + rc, check = self.request("firmware/compatibility-check", method="POST", data={"storageDeviceIds": [self.ssid]}) + except Exception as error: + if retries: + sleep(1) + self.proxy_check_firmware_compatibility(retries - 1) + else: + self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + for count in range(int(self.COMPATIBILITY_CHECK_TIMEOUT_SEC / 5)): + try: + rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"]) + except Exception as error: + continue + + if not response["checkRunning"]: + for result in response["results"][0]["cfwFiles"]: + if result["filename"] == self.firmware_name: + return + self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid)) + sleep(5) + + self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]." % self.ssid) + + def proxy_upload_and_check_compatibility(self): + """Ensure firmware/nvsram file is uploaded and verify compatibility.""" + uploaded_files = [] + try: + rc, uploaded_files = self.request("firmware/cfw-files") + except Exception as error: + self.module.fail_json(msg="Failed to retrieve uploaded firmware and nvsram files. Error [%s]" % to_native(error)) + + if self.firmware: + for uploaded_file in uploaded_files: + if uploaded_file["filename"] == self.firmware_name: + break + else: + fields = [("validate", "true")] + files = [("firmwareFile", self.firmware_name, self.firmware)] + headers, data = create_multipart_formdata(files=files, fields=fields) + try: + rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers) + except Exception as error: + self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]." + % (self.firmware_name, self.ssid, to_native(error))) + self.proxy_check_firmware_compatibility() + + if self.nvsram: + for uploaded_file in uploaded_files: + if uploaded_file["filename"] == self.nvsram_name: + break + else: + fields = [("validate", "true")] + files = [("firmwareFile", self.nvsram_name, self.nvsram)] + headers, data = create_multipart_formdata(files=files, fields=fields) + try: + rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers) + except Exception as error: + self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]." + % (self.nvsram_name, self.ssid, to_native(error))) + self.proxy_check_nvsram_compatibility() + + def proxy_check_upgrade_required(self): + """Determine whether the onboard firmware/nvsram version is the same as the file""" + # Verify controller consistency and get firmware versions + if self.firmware: + current_firmware_version = b"" + try: + # Retrieve current bundle version + if self.is_firmware_bundled(): + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid) + current_firmware_version = six.b(response[0]["versionString"]) + else: + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid) + current_firmware_version = six.b(response[0]) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error))) + + # Determine whether the current firmware version is the same as the file + new_firmware_version = self.firmware_version() + if current_firmware_version != new_firmware_version: + self.upgrade_required = True + + # Build the modules information for logging purposes + self.module_info.update({"bundleDisplay": {"onboard_version": current_firmware_version, "bundled_version": new_firmware_version}}) + + # Determine current NVSRAM version and whether change is required + if self.nvsram: + try: + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid) + + if six.b(response[0]) != self.nvsram_version(): + self.upgrade_required = True + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error))) + + def proxy_wait_for_upgrade(self): + """Wait for SANtricity Web Services Proxy to report upgrade complete""" + self.module.log("(Proxy) Waiting for upgrade to complete...") + + status = {} + while True: + try: + rc, status = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, log_request=False, ignore_errors=True) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve firmware upgrade status! Array [%s]. Error[%s]." % (self.ssid, to_native(error))) + + if "errorMessage" in status: + self.module.warn("Proxy reported an error. Checking whether upgrade completed. Array [%s]. Error [%s]." % (self.ssid, status["errorMessage"])) + self.wait_for_web_services() + break + + if not status["running"]: + if status["activationCompletionTime"]: + self.upgrade_in_progress = False + break + else: + self.module.fail_json(msg="Failed to complete upgrade. Array [%s]." % self.ssid) + sleep(5) + + def delete_mel_events(self): + """Clear all mel-events.""" + try: + rc, response = self.request("storage-systems/%s/mel-events?clearCache=true&resetMel=true" % self.ssid, method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to clear mel-events. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def proxy_upgrade(self): + """Activate previously uploaded firmware related files.""" + self.module.log("(Proxy) Firmware upgrade commencing...") + body = {"stageFirmware": False, "skipMelCheck": self.clear_mel_events, "cfwFile": self.firmware_name} + if self.nvsram: + body.update({"nvsramFile": self.nvsram_name}) + + try: + rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + self.upgrade_in_progress = True + if self.wait_for_completion: + self.proxy_wait_for_upgrade() + + def apply(self): + """Upgrade controller firmware.""" + if self.is_upgrade_in_progress(): + self.module.fail_json(msg="Upgrade is already is progress. Array [%s]." % self.ssid) + + if self.is_embedded(): + self.embedded_check_compatibility() + else: + if not self.is_web_services_version_met(self.MINIMUM_PROXY_VERSION): + self.module.fail_json(msg="Minimum proxy version %s required!") + self.proxy_check_upgrade_required() + + # This will upload the firmware files to the web services proxy but not to the controller + if self.upgrade_required: + self.proxy_upload_and_check_compatibility() + + # Perform upgrade + if self.upgrade_required and not self.module.check_mode: + + if self.clear_mel_events: + self.delete_mel_events() + + if self.is_embedded(): + self.embedded_upgrade() + else: + self.proxy_upgrade() + + self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress, modules_info=self.module_info) + + +def main(): + firmware = NetAppESeriesFirmware() + firmware.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py new file mode 100644 index 000000000..030eb3b1f --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_global.py @@ -0,0 +1,506 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: na_santricity_global +short_description: NetApp E-Series manage global settings configuration +description: + - Allow the user to configure several of the global settings associated with an E-Series storage-system +author: + - Michael Price (@lmprice) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + name: + description: + - Set the name of the E-Series storage-system + - This label/name doesn't have to be unique. + - May be up to 30 characters in length. + type: str + aliases: + - label + cache_block_size: + description: + - Size of the cache's block size. + - All volumes on the storage system share the same cache space; therefore, the volumes can have only one cache block size. + - See M(na_santricity_facts) for available sizes. + type: int + required: False + cache_flush_threshold: + description: + - This is the percentage threshold of the amount of unwritten data that is allowed to remain on the storage array's cache before flushing. + type: int + required: False + default_host_type: + description: + - Default host type for the storage system. + - Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a + host type index which can be found in M(na_santricity_facts) + type: str + required: False + automatic_load_balancing: + description: + - Enable automatic load balancing to allow incoming traffic from the hosts to be dynamically managed and balanced across both controllers. + - Automatic load balancing requires host connectivity reporting to be enabled. + type: str + choices: + - enabled + - disabled + required: False + host_connectivity_reporting: + description: + - Enable host connectivity reporting to allow host connections to be monitored for connection and multipath driver problems. + - When M(automatic_load_balancing==enabled) then M(host_connectivity_reporting) must be enabled + type: str + choices: + - enabled + - disabled + required: False + login_banner_message: + description: + - Text message that appears prior to the login page. + - I(login_banner_message=="") will delete any existing banner message. + type: str + required: False + controller_shelf_id: + description: + - This is the identifier for the drive enclosure containing the controllers. + type: int + required: false + default: 0 +notes: + - Check mode is supported. + - This module requires Web Services API v1.3 or newer. +""" + +EXAMPLES = """ + - name: Set the storage-system name + na_santricity_global: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + name: myArrayName + cache_block_size: 32768 + cache_flush_threshold: 80 + automatic_load_balancing: enabled + default_host_type: Linux DM-MP + - name: Set the storage-system name + na_santricity_global: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + name: myOtherArrayName + cache_block_size: 8192 + cache_flush_threshold: 60 + automatic_load_balancing: disabled + default_host_type: 28 +""" + +RETURN = """ +changed: + description: Whether global settings were changed + returned: on success + type: bool + sample: true +array_name: + description: Current storage array's name + returned: on success + type: str + sample: arrayName +automatic_load_balancing: + description: Whether automatic load balancing feature has been enabled + returned: on success + type: str + sample: enabled +host_connectivity_reporting: + description: Whether host connectivity reporting feature has been enabled + returned: on success + type: str + sample: enabled +cache_settings: + description: Current cache block size and flushing threshold values + returned: on success + type: dict + sample: {"cache_block_size": 32768, "cache_flush_threshold": 80} +default_host_type_index: + description: Current default host type index + returned: on success + type: int + sample: 28 +login_banner_message: + description: Current banner message + returned: on success + type: str + sample: "Banner message here!" +controller_shelf_id: + description: Identifier for the drive enclosure containing the controllers. + returned: on success + type: int + sample: 99 +""" +import random +import sys + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata +from ansible.module_utils import six +from ansible.module_utils._text import to_native +try: + from ansible.module_utils.ansible_release import __version__ as ansible_version +except ImportError: + ansible_version = 'unknown' + + +class NetAppESeriesGlobalSettings(NetAppESeriesModule): + MAXIMUM_LOGIN_BANNER_SIZE_BYTES = 5 * 1024 + LAST_AVAILABLE_CONTROLLER_SHELF_ID = 99 + + def __init__(self): + version = "02.00.0000.0000" + ansible_options = dict(cache_block_size=dict(type="int", require=False), + cache_flush_threshold=dict(type="int", required=False), + default_host_type=dict(type="str", require=False), + automatic_load_balancing=dict(type="str", choices=["enabled", "disabled"], required=False), + host_connectivity_reporting=dict(type="str", choices=["enabled", "disabled"], required=False), + name=dict(type='str', required=False, aliases=['label']), + login_banner_message=dict(type='str', required=False), + controller_shelf_id=dict(type="int", required=False, default=0)) + + super(NetAppESeriesGlobalSettings, self).__init__(ansible_options=ansible_options, + web_services_version=version, + supports_check_mode=True) + args = self.module.params + self.name = args["name"] + self.cache_block_size = args["cache_block_size"] + self.cache_flush_threshold = args["cache_flush_threshold"] + self.host_type_index = args["default_host_type"] + self.controller_shelf_id = args["controller_shelf_id"] + + self.login_banner_message = None + if args["login_banner_message"] is not None: + self.login_banner_message = args["login_banner_message"].rstrip("\n") + + self.autoload_enabled = None + if args["automatic_load_balancing"]: + self.autoload_enabled = args["automatic_load_balancing"] == "enabled" + + self.host_connectivity_reporting_enabled = None + if args["host_connectivity_reporting"]: + self.host_connectivity_reporting_enabled = args["host_connectivity_reporting"] == "enabled" + elif self.autoload_enabled: + self.host_connectivity_reporting_enabled = True + + if self.autoload_enabled and not self.host_connectivity_reporting_enabled: + self.module.fail_json(msg="Option automatic_load_balancing requires host_connectivity_reporting to be enabled. Array [%s]." % self.ssid) + + self.current_configuration_cache = None + + def get_current_configuration(self, update=False): + """Retrieve the current storage array's global configuration.""" + if self.current_configuration_cache is None or update: + self.current_configuration_cache = dict() + + # Get the storage array's capabilities and available options + try: + rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid) + self.current_configuration_cache["autoload_capable"] = "capabilityAutoLoadBalancing" in capabilities["productCapabilities"] + self.current_configuration_cache["cache_block_size_options"] = capabilities["featureParameters"]["cacheBlockSizes"] + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage array capabilities. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + try: + rc, host_types = self.request("storage-systems/%s/host-types" % self.ssid) + self.current_configuration_cache["host_type_options"] = dict() + for host_type in host_types: + self.current_configuration_cache["host_type_options"].update({host_type["code"].lower(): host_type["index"]}) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage array host options. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + # Get the current cache settings + try: + rc, settings = self.request("storage-systems/%s/graph/xpath-filter?query=/sa" % self.ssid) + self.current_configuration_cache["cache_settings"] = {"cache_block_size": settings[0]["cache"]["cacheBlkSize"], + "cache_flush_threshold": settings[0]["cache"]["demandFlushThreshold"]} + self.current_configuration_cache["default_host_type_index"] = settings[0]["defaultHostTypeIndex"] + except Exception as error: + self.module.fail_json(msg="Failed to retrieve cache settings. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + try: + rc, array_info = self.request("storage-systems/%s" % self.ssid) + self.current_configuration_cache["autoload_enabled"] = array_info["autoLoadBalancingEnabled"] + self.current_configuration_cache["host_connectivity_reporting_enabled"] = array_info["hostConnectivityReportingEnabled"] + self.current_configuration_cache["name"] = array_info['name'] + except Exception as error: + self.module.fail_json(msg="Failed to determine current configuration. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + try: + rc, login_banner_message = self.request("storage-systems/%s/login-banner?asFile=false" % self.ssid, ignore_errors=True, json_response=False, + headers={"Accept": "application/octet-stream", "netapp-client-type": "Ansible-%s" % ansible_version}) + self.current_configuration_cache["login_banner_message"] = login_banner_message.decode("utf-8").rstrip("\n") + except Exception as error: + self.module.fail_json(msg="Failed to determine current login banner message. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + try: + rc, hardware_inventory = self.request("storage-systems/%s/hardware-inventory" % self.ssid) + self.current_configuration_cache["controller_shelf_reference"] = hardware_inventory["trays"][0]["trayRef"] + self.current_configuration_cache["controller_shelf_id"] = hardware_inventory["trays"][0]["trayId"] + self.current_configuration_cache["used_shelf_ids"] = [tray["trayId"] for tray in hardware_inventory["trays"]] + except Exception as error: + self.module.fail_json(msg="Failed to retrieve controller shelf identifier. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return self.current_configuration_cache + + def change_cache_block_size_required(self): + """Determine whether cache block size change is required.""" + if self.cache_block_size is None: + return False + + current_configuration = self.get_current_configuration() + current_available_block_sizes = current_configuration["cache_block_size_options"] + if self.cache_block_size not in current_available_block_sizes: + self.module.fail_json(msg="Invalid cache block size. Array [%s]. Available cache block sizes [%s]." % (self.ssid, current_available_block_sizes)) + + return self.cache_block_size != current_configuration["cache_settings"]["cache_block_size"] + + def change_cache_flush_threshold_required(self): + """Determine whether cache flush percentage change is required.""" + if self.cache_flush_threshold is None: + return False + + current_configuration = self.get_current_configuration() + if self.cache_flush_threshold <= 0 or self.cache_flush_threshold >= 100: + self.module.fail_json(msg="Invalid cache flushing threshold, it must be equal to or between 0 and 100. Array [%s]" % self.ssid) + + return self.cache_flush_threshold != current_configuration["cache_settings"]["cache_flush_threshold"] + + def change_host_type_required(self): + """Determine whether default host type change is required.""" + if self.host_type_index is None: + return False + + current_configuration = self.get_current_configuration() + current_available_host_types = current_configuration["host_type_options"] + if isinstance(self.host_type_index, str): + self.host_type_index = self.host_type_index.lower() + + if self.host_type_index in self.HOST_TYPE_INDEXES.keys(): + self.host_type_index = self.HOST_TYPE_INDEXES[self.host_type_index] + elif self.host_type_index in current_available_host_types.keys(): + self.host_type_index = current_available_host_types[self.host_type_index] + + if self.host_type_index not in current_available_host_types.values(): + self.module.fail_json(msg="Invalid host type index! Array [%s]. Available host options [%s]." % (self.ssid, current_available_host_types)) + + return int(self.host_type_index) != current_configuration["default_host_type_index"] + + def change_autoload_enabled_required(self): + """Determine whether automatic load balancing state change is required.""" + if self.autoload_enabled is None: + return False + + change_required = False + current_configuration = self.get_current_configuration() + if self.autoload_enabled and not current_configuration["autoload_capable"]: + self.module.fail_json(msg="Automatic load balancing is not available. Array [%s]." % self.ssid) + + if self.autoload_enabled: + if not current_configuration["autoload_enabled"] or not current_configuration["host_connectivity_reporting_enabled"]: + change_required = True + elif current_configuration["autoload_enabled"]: + change_required = True + + return change_required + + def change_host_connectivity_reporting_enabled_required(self): + """Determine whether host connectivity reporting state change is required.""" + if self.host_connectivity_reporting_enabled is None: + return False + + current_configuration = self.get_current_configuration() + return self.host_connectivity_reporting_enabled != current_configuration["host_connectivity_reporting_enabled"] + + def change_name_required(self): + """Determine whether storage array name change is required.""" + if self.name is None: + return False + + current_configuration = self.get_current_configuration() + if self.name and len(self.name) > 30: + self.module.fail_json(msg="The provided name is invalid, it must be less than or equal to 30 characters in length. Array [%s]" % self.ssid) + + return self.name != current_configuration["name"] + + def change_login_banner_message_required(self): + """Determine whether storage array name change is required.""" + if self.login_banner_message is None: + return False + + current_configuration = self.get_current_configuration() + if self.login_banner_message and sys.getsizeof(self.login_banner_message) > self.MAXIMUM_LOGIN_BANNER_SIZE_BYTES: + self.module.fail_json(msg="The banner message is too long! It must be %s bytes. Array [%s]" % (self.MAXIMUM_LOGIN_BANNER_SIZE_BYTES, self.ssid)) + return self.login_banner_message != current_configuration["login_banner_message"] + + def change_controller_shelf_id_required(self): + """Determine whether storage array tray identifier change is required.""" + current_configuration = self.get_current_configuration() + if self.controller_shelf_id is not None and self.controller_shelf_id != current_configuration["controller_shelf_id"]: + + if self.controller_shelf_id in current_configuration["used_shelf_ids"]: + self.module.fail_json(msg="The controller_shelf_id is currently being used by another shelf. Used Identifiers: [%s]. Array [%s]." % (", ".join([str(id) for id in self.get_current_configuration()["used_shelf_ids"]]), self.ssid)) + + if self.controller_shelf_id < 0 or self.controller_shelf_id > self.LAST_AVAILABLE_CONTROLLER_SHELF_ID: + self.module.fail_json(msg="The controller_shelf_id must be 0-99 and not already used by another shelf. Used Identifiers: [%s]. Array [%s]." % (", ".join([str(id) for id in self.get_current_configuration()["used_shelf_ids"]]), self.ssid)) + + return True + return False + + def update_cache_settings(self): + """Update cache block size and/or flushing threshold.""" + current_configuration = self.get_current_configuration() + block_size = self.cache_block_size if self.cache_block_size else current_configuration["cache_settings"]["cache_block_size"] + threshold = self.cache_flush_threshold if self.cache_flush_threshold else current_configuration["cache_settings"]["cache_flush_threshold"] + try: + rc, cache_settings = self.request("storage-systems/%s/symbol/setSACacheParams?verboseErrorResponse=true" % self.ssid, method="POST", + data={"cacheBlkSize": block_size, "demandFlushAmount": threshold, "demandFlushThreshold": threshold}) + except Exception as error: + self.module.fail_json(msg="Failed to set cache settings. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def update_host_type(self): + """Update default host type.""" + try: + rc, default_host_type = self.request("storage-systems/%s/symbol/setStorageArrayProperties?verboseErrorResponse=true" % self.ssid, method="POST", + data={"settings": {"defaultHostTypeIndex": self.host_type_index}}) + except Exception as error: + self.module.fail_json(msg="Failed to set default host type. Array [%s]. Error [%s]" % (self.ssid, to_native(error))) + + def update_autoload(self): + """Update automatic load balancing state.""" + current_configuration = self.get_current_configuration() + if self.autoload_enabled and not current_configuration["host_connectivity_reporting_enabled"]: + try: + rc, host_connectivity_reporting = self.request("storage-systems/%s/symbol/setHostConnectivityReporting?verboseErrorResponse=true" % self.ssid, + method="POST", data={"enableHostConnectivityReporting": self.autoload_enabled}) + except Exception as error: + self.module.fail_json(msg="Failed to enable host connectivity reporting which is needed for automatic load balancing state." + " Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + try: + rc, autoload = self.request("storage-systems/%s/symbol/setAutoLoadBalancing?verboseErrorResponse=true" % self.ssid, + method="POST", data={"enableAutoLoadBalancing": self.autoload_enabled}) + except Exception as error: + self.module.fail_json(msg="Failed to set automatic load balancing state. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def update_host_connectivity_reporting_enabled(self): + """Update automatic load balancing state.""" + try: + rc, host_connectivity_reporting = self.request("storage-systems/%s/symbol/setHostConnectivityReporting?verboseErrorResponse=true" % self.ssid, + method="POST", data={"enableHostConnectivityReporting": self.host_connectivity_reporting_enabled}) + except Exception as error: + self.module.fail_json(msg="Failed to enable host connectivity reporting. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def update_name(self): + """Update storage array's name.""" + try: + rc, result = self.request("storage-systems/%s/configuration" % self.ssid, method="POST", data={"name": self.name}) + except Exception as err: + self.module.fail_json(msg="Failed to set the storage array name! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update_login_banner_message(self): + """Update storage login banner message.""" + if self.login_banner_message: + boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(27)]) + data_parts = list() + data = None + + if six.PY2: # Generate payload for Python 2 + newline = "\r\n" + data_parts.extend(["--%s" % boundary, + 'Content-Disposition: form-data; name="file"; filename="banner.txt"', + "Content-Type: text/plain", + "", + self.login_banner_message]) + data_parts.extend(["--%s--" % boundary, ""]) + data = newline.join(data_parts) + + else: + newline = six.b("\r\n") + data_parts.extend([six.b("--%s" % boundary), + six.b('Content-Disposition: form-data; name="file"; filename="banner.txt"'), + six.b("Content-Type: text/plain"), + six.b(""), + six.b(self.login_banner_message)]) + data_parts.extend([six.b("--%s--" % boundary), b""]) + data = newline.join(data_parts) + + headers = {"Content-Type": "multipart/form-data; boundary=%s" % boundary, "Content-Length": str(len(data))} + + try: + rc, result = self.request("storage-systems/%s/login-banner" % self.ssid, method="POST", headers=headers, data=data) + except Exception as err: + self.module.fail_json(msg="Failed to set the storage system login banner message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + else: + try: + rc, result = self.request("storage-systems/%s/login-banner" % self.ssid, method="DELETE") + except Exception as err: + self.module.fail_json(msg="Failed to clear the storage system login banner message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update_controller_shelf_id(self): + """Update controller shelf tray identifier.""" + current_configuration = self.get_current_configuration() + try: + rc, tray = self.request("storage-systems/%s/symbol/updateTray?verboseErrorResponse=true" % self.ssid, method="POST", + data={"ref": current_configuration["controller_shelf_reference"], "trayID": self.controller_shelf_id}) + except Exception as error: + self.module.fail_json(msg="Failed to update controller shelf identifier. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def update(self): + """Ensure the storage array's global setting are correctly set.""" + change_required = False + if (self.change_autoload_enabled_required() or self.change_cache_block_size_required() or self.change_cache_flush_threshold_required() or + self.change_host_type_required() or self.change_name_required() or self.change_host_connectivity_reporting_enabled_required() or + self.change_login_banner_message_required() or self.change_controller_shelf_id_required()): + change_required = True + + if change_required and not self.module.check_mode: + if self.change_autoload_enabled_required(): + self.update_autoload() + if self.change_host_connectivity_reporting_enabled_required(): + self.update_host_connectivity_reporting_enabled() + if self.change_cache_block_size_required() or self.change_cache_flush_threshold_required(): + self.update_cache_settings() + if self.change_host_type_required(): + self.update_host_type() + if self.change_name_required(): + self.update_name() + if self.change_login_banner_message_required(): + self.update_login_banner_message() + if self.change_controller_shelf_id_required(): + self.update_controller_shelf_id() + + current_configuration = self.get_current_configuration(update=True) + self.module.exit_json(changed=change_required, + cache_settings=current_configuration["cache_settings"], + default_host_type_index=current_configuration["default_host_type_index"], + automatic_load_balancing="enabled" if current_configuration["autoload_enabled"] else "disabled", + host_connectivity_reporting="enabled" if current_configuration["host_connectivity_reporting_enabled"] else "disabled", + array_name=current_configuration["name"], + login_banner_message=current_configuration["login_banner_message"], + controller_shelf_id=current_configuration["controller_shelf_id"]) + + +def main(): + global_settings = NetAppESeriesGlobalSettings() + global_settings.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py new file mode 100644 index 000000000..0da00fcd0 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_host.py @@ -0,0 +1,490 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_host +short_description: NetApp E-Series manage eseries hosts +description: Create, update, remove hosts on NetApp E-series storage arrays +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + name: + description: + - If the host doesn't yet exist, the label/name to assign at creation time. + - If the hosts already exists, this will be used to uniquely identify the host to make any required changes + type: str + required: True + aliases: + - label + state: + description: + - Set to absent to remove an existing host + - Set to present to modify or create a new host definition + type: str + choices: + - absent + - present + default: present + host_type: + description: + - Host type includes operating system and multipath considerations. + - If not specified, the default host type will be utilized. Default host type can be set using M(netapp_eseries.santricity.na_santricity_global). + - For storage array specific options see M(netapp_eseries.santricity.na_santricity_facts). + - All values are case-insensitive. + - AIX MPIO - The Advanced Interactive Executive (AIX) OS and the native MPIO driver + - AVT 4M - Silicon Graphics, Inc. (SGI) proprietary multipath driver + - HP-UX - The HP-UX OS with native multipath driver + - Linux ATTO - The Linux OS and the ATTO Technology, Inc. driver (must use ATTO FC HBAs) + - Linux DM-MP - The Linux OS and the native DM-MP driver + - Linux Pathmanager - The Linux OS and the SGI proprietary multipath driver + - Mac - The Mac OS and the ATTO Technology, Inc. driver + - ONTAP - FlexArray + - Solaris 11 or later - The Solaris 11 or later OS and the native MPxIO driver + - Solaris 10 or earlier - The Solaris 10 or earlier OS and the native MPxIO driver + - SVC - IBM SAN Volume Controller + - VMware - ESXi OS + - Windows - Windows Server OS and Windows MPIO with a DSM driver + - Windows Clustered - Clustered Windows Server OS and Windows MPIO with a DSM driver + - Windows ATTO - Windows OS and the ATTO Technology, Inc. driver + type: str + required: False + aliases: + - host_type_index + ports: + description: + - A list of host ports you wish to associate with the host. + - Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are + uniquely identified by a label and these must be unique. + type: list + required: False + suboptions: + type: + description: + - The interface type of the port to define. + - Acceptable choices depend on the capabilities of the target hardware/software platform. + required: true + choices: + - iscsi + - sas + - fc + - ib + - nvmeof + label: + description: + - A unique label to assign to this port assignment. + required: true + port: + description: + - The WWN or IQN of the hostPort to assign to this port definition. + required: true + force_port: + description: + - Allow ports that are already assigned to be re-assigned to your current host + required: false + type: bool +""" + +EXAMPLES = """ + - name: Define or update an existing host named "Host1" + na_santricity_host: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + name: "Host1" + state: present + host_type_index: Linux DM-MP + ports: + - type: "iscsi" + label: "PORT_1" + port: "iqn.1996-04.de.suse:01:56f86f9bd1fe" + - type: "fc" + label: "FC_1" + port: "10:00:FF:7C:FF:FF:FF:01" + - type: "fc" + label: "FC_2" + port: "10:00:FF:7C:FF:FF:FF:00" + + - name: Ensure a host named "Host2" doesn"t exist + na_santricity_host: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + name: "Host2" + state: absent +""" + +RETURN = """ +msg: + description: + - A user-readable description of the actions performed. + returned: on success + type: str + sample: The host has been created. +id: + description: + - the unique identifier of the host on the E-Series storage-system + returned: on success when state=present + type: str + sample: 00000000600A098000AAC0C3003004700AD86A52 +ssid: + description: + - the unique identifer of the E-Series storage-system with the current api + returned: on success + type: str + sample: 1 +api_url: + description: + - the url of the API that this request was proccessed by + returned: on success + type: str + sample: https://webservices.example.com:8443 +""" +import re + +from ansible.module_utils._text import to_native +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule + + +class NetAppESeriesHost(NetAppESeriesModule): + PORT_TYPES = ["iscsi", "sas", "fc", "ib", "nvmeof"] + + def __init__(self): + ansible_options = dict(state=dict(type="str", default="present", choices=["absent", "present"]), + ports=dict(type="list", required=False), + force_port=dict(type="bool", default=False), + name=dict(type="str", required=True, aliases=["label"]), + host_type=dict(type="str", required=False, aliases=["host_type_index"])) + + super(NetAppESeriesHost, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + self.check_mode = self.module.check_mode + args = self.module.params + self.ports = args["ports"] + self.force_port = args["force_port"] + self.name = args["name"] + self.state = args["state"] + + self.post_body = dict() + self.all_hosts = list() + self.host_obj = dict() + self.new_ports = list() + self.ports_for_update = list() + self.ports_for_removal = list() + + # Update host type with the corresponding index + host_type = args["host_type"] + if host_type: + host_type = host_type.lower() + if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]: + self.host_type_index = self.HOST_TYPE_INDEXES[host_type] + elif host_type.isdigit(): + self.host_type_index = int(args["host_type"]) + else: + self.module.fail_json(msg="host_type must be either a host type name or host type index found integer the documentation.") + else: + self.host_type_index = None + + if not self.url.endswith("/"): + self.url += "/" + + # Fix port representation if they are provided with colons + if self.ports is not None: + for port in self.ports: + port["type"] = port["type"].lower() + port["port"] = port["port"].lower() + + if port["type"] not in self.PORT_TYPES: + self.module.fail_json(msg="Invalid port type! Port interface type must be one of [%s]." % ", ".join(self.PORT_TYPES)) + + # Determine whether address is 16-byte WWPN and, if so, remove + if re.match(r"^(0x)?[0-9a-f]{16}$", port["port"].replace(":", "")): + port["port"] = port["port"].replace(":", '').replace("0x", "") + + if port["type"] == "ib": + port["port"] = "0" * (32 - len(port["port"])) + port["port"] + + @property + def default_host_type(self): + """Return the default host type index.""" + try: + rc, default_index = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/defaultHostTypeIndex" % self.ssid) + return default_index[0] + except Exception as error: + self.module.fail_json(msg="Failed to retrieve default host type index") + + @property + def valid_host_type(self): + host_types = None + try: + rc, host_types = self.request("storage-systems/%s/host-types" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + try: + match = list(filter(lambda host_type: host_type["index"] == self.host_type_index, host_types))[0] + return True + except IndexError: + self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index) + + def check_port_types(self): + """Check to see whether the port interface types are available on storage system.""" + try: + rc, interfaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid) + + for port in self.ports: + for interface in interfaces: + + # Check for IB iSER + if port["type"] == "ib" and "iqn" in port["port"]: + if ((interface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and + interface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["type"] == "infiniband" and + interface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["infinibandData"]["isIser"]) or + (interface["ioInterfaceTypeData"]["interfaceType"] == "ib" and + interface["ioInterfaceTypeData"]["ib"]["isISERSupported"])): + port["type"] = "iscsi" + break + # Check for NVMe + elif (port["type"] == "nvmeof" and "commandProtocolPropertiesList" in interface and + "commandProtocolProperties" in interface["commandProtocolPropertiesList"] and + interface["commandProtocolPropertiesList"]["commandProtocolProperties"]): + if interface["commandProtocolPropertiesList"]["commandProtocolProperties"][0]["commandProtocol"] == "nvme": + break + # Check SAS, FC, iSCSI + elif ((port["type"] == "fc" and interface["ioInterfaceTypeData"]["interfaceType"] == "fibre") or + (port["type"] == interface["ioInterfaceTypeData"]["interfaceType"])): + break + else: + # self.module.fail_json(msg="Invalid port type! Type [%s]. Port [%s]." % (port["type"], port["label"])) + self.module.warn("Port type not found in hostside interfaces! Type [%s]. Port [%s]." % (port["type"], port["label"])) + except Exception as error: + # For older versions of web services + for port in self.ports: + if port["type"] == "ib" and "iqn" in port["port"]: + port["type"] = "iscsi" + break + + def assigned_host_ports(self, apply_unassigning=False): + """Determine if the hostPorts requested have already been assigned and return list of required used ports.""" + used_host_ports = {} + for host in self.all_hosts: + if host["label"].lower() != self.name.lower(): + for host_port in host["hostSidePorts"]: + + # Compare expected ports with those from other hosts definitions. + for port in self.ports: + if port["port"] == host_port["address"] or port["label"].lower() == host_port["label"].lower(): + if not self.force_port: + self.module.fail_json(msg="Port label or address is already used and force_port option is set to false!") + else: + # Determine port reference + port_ref = [port["hostPortRef"] for port in host["ports"] + if port["hostPortName"] == host_port["address"]] + port_ref.extend([port["initiatorRef"] for port in host["initiators"] + if port["nodeName"]["iscsiNodeName"] == host_port["address"]]) + + # Create dictionary of hosts containing list of port references + if host["hostRef"] not in used_host_ports.keys(): + used_host_ports.update({host["hostRef"]: port_ref}) + else: + used_host_ports[host["hostRef"]].extend(port_ref) + + # Unassign assigned ports + if apply_unassigning: + for host_ref in used_host_ports.keys(): + try: + rc, resp = self.request("storage-systems/%s/hosts/%s" % (self.ssid, host_ref), method="POST", + data={"portsToRemove": used_host_ports[host_ref]}) + except Exception as err: + self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]. Error [%s]." + % (self.host_obj["id"], self.ssid, used_host_ports[host_ref], to_native(err))) + + @property + def host_exists(self): + """Determine if the requested host exists + As a side effect, set the full list of defined hosts in "all_hosts", and the target host in "host_obj". + """ + match = False + all_hosts = list() + + try: + rc, all_hosts = self.request("storage-systems/%s/hosts" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + # Augment the host objects + for host in all_hosts: + for port in host["hostSidePorts"]: + port["type"] = port["type"].lower() + port["address"] = port["address"].lower() + + # Augment hostSidePorts with their ID (this is an omission in the API) + ports = dict((port["label"], port["id"]) for port in host["ports"]) + ports.update(dict((port["label"], port["id"]) for port in host["initiators"])) + + for host_side_port in host["hostSidePorts"]: + if host_side_port["label"] in ports: + host_side_port["id"] = ports[host_side_port["label"]] + + if host["label"].lower() == self.name.lower(): + self.host_obj = host + match = True + + self.all_hosts = all_hosts + return match + + @property + def needs_update(self): + """Determine whether we need to update the Host object + As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add + (newPorts), on self. + """ + changed = False + if self.host_obj["hostTypeIndex"] != self.host_type_index: + changed = True + + current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]}) + for port in self.host_obj["hostSidePorts"]) + + if self.ports: + for port in self.ports: + for current_host_port_id in current_host_ports.keys(): + if port == current_host_ports[current_host_port_id]: + current_host_ports.pop(current_host_port_id) + break + + elif port["port"] == current_host_ports[current_host_port_id]["port"]: + if self.port_on_diff_host(port) and not self.force_port: + self.module.fail_json(msg="The port you specified [%s] is associated with a different host." + " Specify force_port as True or try a different port spec" % port) + + if (port["label"] != current_host_ports[current_host_port_id]["label"] or + port["type"] != current_host_ports[current_host_port_id]["type"]): + current_host_ports.pop(current_host_port_id) + self.ports_for_update.append({"portRef": current_host_port_id, "port": port["port"], + "label": port["label"], "hostRef": self.host_obj["hostRef"]}) + break + else: + self.new_ports.append(port) + + self.ports_for_removal = list(current_host_ports.keys()) + changed = any([self.new_ports, self.ports_for_update, self.ports_for_removal, changed]) + return changed + + def port_on_diff_host(self, arg_port): + """ Checks to see if a passed in port arg is present on a different host""" + for host in self.all_hosts: + + # Only check "other" hosts + if host["name"].lower() != self.name.lower(): + for port in host["hostSidePorts"]: + + # Check if the port label is found in the port dict list of each host + if arg_port["label"].lower() == port["label"].lower() or arg_port["port"].lower() == port["address"].lower(): + return True + return False + + def update_host(self): + self.post_body = {"name": self.name, "hostType": {"index": self.host_type_index}} + + # Remove ports that need reassigning from their current host. + if self.ports: + self.assigned_host_ports(apply_unassigning=True) + self.post_body["portsToUpdate"] = self.ports_for_update + self.post_body["portsToRemove"] = self.ports_for_removal + self.post_body["ports"] = self.new_ports + + if not self.check_mode: + try: + rc, self.host_obj = self.request("storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj["id"]), method="POST", + data=self.post_body, ignore_errors=True) + except Exception as err: + self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + self.module.exit_json(changed=True) + + def create_host(self): + # Remove ports that need reassigning from their current host. + self.assigned_host_ports(apply_unassigning=True) + + # needs_reassignment = False + post_body = dict(name=self.name, + hostType=dict(index=self.host_type_index)) + + if self.ports: + post_body.update(ports=self.ports) + + if not self.host_exists: + if not self.check_mode: + try: + rc, self.host_obj = self.request("storage-systems/%s/hosts" % self.ssid, method="POST", data=post_body, ignore_errors=True) + except Exception as err: + self.module.fail_json(msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + else: + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload) + + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=True, msg="Host created.") + + def remove_host(self): + try: + rc, resp = self.request("storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj["id"]), method="DELETE") + except Exception as err: + self.module.fail_json(msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj["id"], self.ssid, to_native(err))) + + def build_success_payload(self, host=None): + keys = [] # ["id"] + + if host: + result = dict((key, host[key]) for key in keys) + else: + result = dict() + result["ssid"] = self.ssid + result["api_url"] = self.url + return result + + def apply(self): + if self.state == "present": + if self.host_type_index is None: + self.host_type_index = self.default_host_type + + self.check_port_types() + if self.host_exists: + if self.needs_update and self.valid_host_type: + self.update_host() + else: + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload) + elif self.valid_host_type: + self.create_host() + else: + payload = self.build_success_payload() + if self.host_exists: + self.remove_host() + self.module.exit_json(changed=True, msg="Host removed.", **payload) + else: + self.module.exit_json(changed=False, msg="Host already absent.", **payload) + + +def main(): + host = NetAppESeriesHost() + host.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py new file mode 100644 index 000000000..7b8a9e2aa --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_hostgroup.py @@ -0,0 +1,279 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_hostgroup +short_description: NetApp E-Series manage array host groups +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +description: Create, update or destroy host groups on a NetApp E-Series storage array. +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - Whether the specified host group should exist or not. + type: str + choices: ["present", "absent"] + default: present + name: + description: + - Name of the host group to manage + type: str + required: false + hosts: + description: + - List of host names/labels to add to the group + type: list + required: false +""" +EXAMPLES = """ + - name: Configure Hostgroup + na_santricity_hostgroup: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: present + name: example_hostgroup + hosts: + - host01 + - host02 +""" +RETURN = """ +clusterRef: + description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster. + returned: always except when state is absent + type: str + sample: "3233343536373839303132333100000000000000" +confirmLUNMappingCreation: + description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping + will alter the volume access rights of other clusters, in addition to this one. + returned: always + type: bool + sample: false +hosts: + description: A list of the hosts that are part of the host group after all operations. + returned: always except when state is absent + type: list + sample: ["HostA","HostB"] +id: + description: The id number of the hostgroup + returned: always except when state is absent + type: str + sample: "3233343536373839303132333100000000000000" +isSAControlled: + description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false, + indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings. + returned: always except when state is absent + type: bool + sample: false +label: + description: The user-assigned, descriptive label string for the cluster. + returned: always + type: str + sample: "MyHostGroup" +name: + description: same as label + returned: always except when state is absent + type: str + sample: "MyHostGroup" +protectionInformationCapableAccessMethod: + description: This field is true if the host has a PI capable access method. + returned: always except when state is absent + type: bool + sample: true +""" +from ansible.module_utils._text import to_native +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request + + +class NetAppESeriesHostGroup(NetAppESeriesModule): + EXPANSION_TIMEOUT_SEC = 10 + DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11 + + def __init__(self): + version = "02.00.0000.0000" + ansible_options = dict( + state=dict(choices=["present", "absent"], type="str", default="present"), + name=dict(required=True, type="str"), + hosts=dict(required=False, type="list")) + super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options, + web_services_version=version, + supports_check_mode=True) + + args = self.module.params + self.state = args["state"] + self.name = args["name"] + self.hosts_list = args["hosts"] + + self.current_host_group = None + self.hosts_cache = None + + @property + def hosts(self): + """Retrieve a list of host reference identifiers should be associated with the host group.""" + if self.hosts_cache is None: + self.hosts_cache = [] + existing_hosts = [] + + if self.hosts_list: + try: + rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + for host in self.hosts_list: + for existing_host in existing_hosts: + if host in existing_host["id"] or host.lower() in existing_host["name"].lower(): + self.hosts_cache.append(existing_host["id"]) + break + else: + self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]." % (self.ssid, host)) + self.hosts_cache.sort() + return self.hosts_cache + + @property + def host_groups(self): + """Retrieve a list of existing host groups.""" + host_groups = [] + hosts = [] + try: + rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid) + rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups] + for group in host_groups: + hosts_ids = [] + for host in hosts: + if group["id"] == host["clusterRef"]: + hosts_ids.append(host["hostRef"]) + group.update({"hosts": hosts_ids}) + + return host_groups + + @property + def current_hosts_in_host_group(self): + """Retrieve the current hosts associated with the current hostgroup.""" + current_hosts = [] + for group in self.host_groups: + if group["name"] == self.name: + current_hosts = group["hosts"] + break + + return current_hosts + + def unassign_hosts(self, host_list=None): + """Unassign hosts from host group.""" + if host_list is None: + host_list = self.current_host_group["hosts"] + + for host_id in host_list: + try: + rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id), + method="POST", data={"group": "0000000000000000000000000000000000000000"}) + except Exception as error: + self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]." + " Error[%s]." % (self.ssid, host_id, to_native(error))) + + def delete_host_group(self, unassign_hosts=True): + """Delete host group""" + if unassign_hosts: + self.unassign_hosts() + + try: + rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + def create_host_group(self): + """Create host group.""" + data = {"name": self.name, "hosts": self.hosts} + + response = None + try: + rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data) + except Exception as error: + self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + return response + + def update_host_group(self): + """Update host group.""" + data = {"name": self.name, "hosts": self.hosts} + + # unassign hosts that should not be part of the hostgroup + desired_host_ids = self.hosts + for host in self.current_hosts_in_host_group: + if host not in desired_host_ids: + self.unassign_hosts([host]) + + update_response = None + try: + rc, update_response = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]), method="POST", data=data) + except Exception as error: + self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + return update_response + + def apply(self): + """Apply desired host group state to the storage array.""" + changes_required = False + + # Search for existing host group match + for group in self.host_groups: + if group["name"] == self.name: + self.current_host_group = group + self.current_host_group["hosts"].sort() + break + + # Determine whether changes are required + if self.state == "present": + if self.current_host_group: + if self.hosts and self.hosts != self.current_host_group["hosts"]: + changes_required = True + else: + if not self.name: + self.module.fail_json(msg="The option name must be supplied when creating a new host group. Array id [%s]." % self.ssid) + changes_required = True + + elif self.current_host_group: + changes_required = True + + # Apply any necessary changes + msg = "" + if changes_required and not self.module.check_mode: + msg = "No changes required." + if self.state == "present": + if self.current_host_group: + if self.hosts != self.current_host_group["hosts"]: + msg = self.update_host_group() + else: + msg = self.create_host_group() + + elif self.current_host_group: + self.delete_host_group() + msg = "Host group deleted. Array Id [%s]. Host group [%s]." % (self.ssid, self.current_host_group["name"]) + + self.module.exit_json(msg=msg, changed=changes_required) + + +def main(): + hostgroup = NetAppESeriesHostGroup() + hostgroup.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py new file mode 100644 index 000000000..364bef73f --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ib_iser_interface.py @@ -0,0 +1,257 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_ib_iser_interface +short_description: NetApp E-Series manage InfiniBand iSER interface configuration +description: + - Configure settings of an E-Series InfiniBand iSER interface IPv4 address configuration. +author: + - Michael Price (@lmprice) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are presented alphabetically, with the first controller as A, the second as B, and so on. + - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard limitation and could change in the future. + type: str + required: true + choices: + - A + - B + channel: + description: + - The InfiniBand HCA port you wish to modify. + - Ports start left to right and start with 1. + type: int + required: true + address: + description: + - The IPv4 address to assign to the interface. + - Should be specified in xx.xx.xx.xx form. + type: str + required: true +notes: + - Check mode is supported. +""" + +EXAMPLES = """ + - name: Configure the first port on the A controller with a static IPv4 address + na_santricity_ib_iser_interface: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + controller: "A" + channel: "1" + address: "192.168.1.100" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The interface settings have been updated. +enabled: + description: + - Indicates whether IPv4 connectivity has been enabled or disabled. + - This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance, + it is unlikely that the configuration will actually be valid. + returned: on success + sample: True + type: bool +""" +import re + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesIbIserInterface(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(controller=dict(type="str", required=True, choices=["A", "B"]), + channel=dict(type="int"), + address=dict(type="str", required=True)) + + super(NetAppESeriesIbIserInterface, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.controller = args["controller"] + self.channel = args["channel"] + self.address = args["address"] + self.check_mode = self.module.check_mode + + self.get_target_interface_cache = None + + # A relatively primitive regex to validate that the input is formatted like a valid ip address + address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$") + if self.address and not address_regex.match(self.address): + self.module.fail_json(msg="An invalid ip address was provided for address.") + + def get_interfaces(self): + """Retrieve and filter all hostside interfaces for IB iSER.""" + ifaces = [] + try: + rc, ifaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + # Filter out non-ib-iser interfaces + ib_iser_ifaces = [] + for iface in ifaces: + if ((iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and + iface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["type"] == "infiniband" and + iface["ioInterfaceTypeData"]["iscsi"]["interfaceData"]["infinibandData"]["isIser"]) or + (iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and + iface["ioInterfaceTypeData"]["ib"]["isISERSupported"])): + ib_iser_ifaces.append(iface) + + if not ib_iser_ifaces: + self.module.fail_json(msg="Failed to detect any InfiniBand iSER interfaces! Array [%s] - %s." % self.ssid) + + return ib_iser_ifaces + + def get_controllers(self): + """Retrieve a mapping of controller labels to their references + { + 'A': '070000000000000000000001', + 'B': '070000000000000000000002', + } + :return: the controllers defined on the system + """ + controllers = list() + try: + rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + controllers.sort() + + controllers_dict = {} + i = ord('A') + for controller in controllers: + label = chr(i) + controllers_dict[label] = controller + i += 1 + + return controllers_dict + + def get_ib_link_status(self): + """Determine the infiniband link status. Returns dictionary keyed by interface reference number.""" + link_statuses = {} + try: + rc, result = self.request("storage-systems/%s/hardware-inventory" % self.ssid) + for link in result["ibPorts"]: + link_statuses.update({link["channelPortRef"]: link["linkState"]}) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve ib link status information! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + return link_statuses + + def get_target_interface(self): + """Search for the selected IB iSER interface""" + if self.get_target_interface_cache is None: + ifaces = self.get_interfaces() + ifaces_status = self.get_ib_link_status() + controller_id = self.get_controllers()[self.controller] + + controller_ifaces = [] + for iface in ifaces: + if iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and iface["controllerRef"] == controller_id: + controller_ifaces.append([iface["ioInterfaceTypeData"]["iscsi"]["channel"], iface, + ifaces_status[iface["ioInterfaceTypeData"]["iscsi"]["channelPortRef"]]]) + elif iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and iface["controllerRef"] == controller_id: + controller_ifaces.append([iface["ioInterfaceTypeData"]["ib"]["channel"], iface, + iface["ioInterfaceTypeData"]["ib"]["linkState"]]) + + sorted_controller_ifaces = sorted(controller_ifaces) + if self.channel < 1 or self.channel > len(controller_ifaces): + status_msg = ", ".join(["%s (link %s)" % (index + 1, values[2]) + for index, values in enumerate(sorted_controller_ifaces)]) + self.module.fail_json(msg="Invalid controller %s HCA channel. Available channels: %s, Array Id [%s]." + % (self.controller, status_msg, self.ssid)) + + self.get_target_interface_cache = sorted_controller_ifaces[self.channel - 1][1] + return self.get_target_interface_cache + + def is_change_required(self): + """Determine whether change is required.""" + changed_required = False + iface = self.get_target_interface() + if (iface["ioInterfaceTypeData"]["interfaceType"] == "iscsi" and + iface["ioInterfaceTypeData"]["iscsi"]["ipv4Data"]["ipv4AddressData"]["ipv4Address"] != self.address): + changed_required = True + + elif iface["ioInterfaceTypeData"]["interfaceType"] == "ib" and iface["ioInterfaceTypeData"]["ib"]["isISERSupported"]: + for properties in iface["commandProtocolPropertiesList"]["commandProtocolProperties"]: + if (properties["commandProtocol"] == "scsi" and + properties["scsiProperties"]["scsiProtocolType"] == "iser" and + properties["scsiProperties"]["iserProperties"]["ipv4Data"]["ipv4AddressData"]["ipv4Address"] != self.address): + changed_required = True + + return changed_required + + def make_request_body(self): + iface = self.get_target_interface() + body = {"iscsiInterface": iface["ioInterfaceTypeData"][iface["ioInterfaceTypeData"]["interfaceType"]]["id"], + "settings": {"tcpListenPort": [], + "ipv4Address": [self.address], + "ipv4SubnetMask": [], + "ipv4GatewayAddress": [], + "ipv4AddressConfigMethod": [], + "maximumFramePayloadSize": [], + "ipv4VlanId": [], + "ipv4OutboundPacketPriority": [], + "ipv4Enabled": [], + "ipv6Enabled": [], + "ipv6LocalAddresses": [], + "ipv6RoutableAddresses": [], + "ipv6PortRouterAddress": [], + "ipv6AddressConfigMethod": [], + "ipv6OutboundPacketPriority": [], + "ipv6VlanId": [], + "ipv6HopLimit": [], + "ipv6NdReachableTime": [], + "ipv6NdRetransmitTime": [], + "ipv6NdStaleTimeout": [], + "ipv6DuplicateAddressDetectionAttempts": [], + "maximumInterfaceSpeed": []}} + return body + + def update(self): + """Make any necessary updates.""" + update_required = self.is_change_required() + if update_required and not self.check_mode: + try: + rc, result = self.request("storage-systems/%s/symbol/setIscsiInterfaceProperties" + % self.ssid, method="POST", data=self.make_request_body()) + except Exception as error: + self.module.fail_json(msg="Failed to modify the interface! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + self.module.exit_json(msg="The interface settings have been updated.", changed=update_required) + + self.module.exit_json(msg="No changes were required.", changed=update_required) + + +def main(): + ib_iser = NetAppESeriesIbIserInterface() + ib_iser.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py new file mode 100644 index 000000000..e85e8b68c --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_interface.py @@ -0,0 +1,423 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_iscsi_interface +short_description: NetApp E-Series manage iSCSI interface configuration +description: + - Configure settings of an E-Series iSCSI interface +author: + - Michael Price (@lmprice) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are presented alphabetically, with the first controller as A, + the second as B, and so on. + - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard + limitation and could change in the future. + type: str + required: true + choices: + - A + - B + port: + description: + - The controller iSCSI baseboard or HIC port to modify. + - Determine the port by counting, starting from one, the controller's iSCSI ports left to right. Count the + baseboard and then the HIC ports. + type: int + required: true + state: + description: + - When enabled, the provided configuration will be utilized. + - When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled. + type: str + choices: + - enabled + - disabled + default: enabled + address: + description: + - The IPv4 address to assign to the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + required: false + subnet_mask: + description: + - The subnet mask to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + gateway: + description: + - The IPv4 gateway address to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + required: false + config_method: + description: + - The configuration method type to use for this interface. + - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway). + type: str + choices: + - dhcp + - static + default: dhcp + required: false + mtu: + description: + - The maximum transmission units (MTU), in bytes. + - This allows you to configure a larger value for the MTU, in order to enable jumbo frames + (any value > 1500). + - Generally, it is necessary to have your host, switches, and other components not only support jumbo + frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to + leave this at the default. + type: int + default: 1500 + required: false + aliases: + - max_frame_size + speed: + description: + - The option will change the interface port speed. + - Only supported speeds will be accepted and must be in the form [0-9]+[gm] (i.e. 25g) + - 'Down' interfaces will report 'Unknown' speed until they are set to an accepted network speed. + - Do not use this option when the port's speed is automatically configured as it will fail. See System + Manager for the port's capability. + type: str + required: false +notes: + - Check mode is supported. + - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address + via dhcp, etc), can take seconds or minutes longer to take effect. + - This module will not be useful/usable on an E-Series system without any iSCSI interfaces. + - This module requires a Web Services API version of >= 1.3. +""" + +EXAMPLES = """ + - name: Configure the first port on the A controller with a static IPv4 address + na_santricity_iscsi_interface: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + port: "1" + controller: "A" + config_method: static + address: "192.168.1.100" + subnet_mask: "255.255.255.0" + gateway: "192.168.1.1" + speed: "25g" + + - name: Disable ipv4 connectivity for the second port on the B controller + na_santricity_iscsi_interface: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + port: "2" + controller: "B" + state: disabled + + - name: Enable jumbo frames for the first 4 ports on controller A + na_santricity_iscsi_interface: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + port: "{{ item }}" + controller: "A" + state: enabled + mtu: 9000 + config_method: dhcp + loop: + - 1 + - 2 + - 3 + - 4 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The interface settings have been updated. +""" +import re + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + +def strip_interface_speed(speed): + """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'""" + if isinstance(speed, list): + result = [re.match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed] + result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp] + result = ["auto" if re.match(r"auto", sp) else sp for sp in result] + else: + result = re.match(r"speed[0-9]{1,3}[gm]", speed) + result = result.group().replace("speed", "") if result else "unknown" + result = "auto" if re.match(r"auto", result.lower()) else result + return result + +class NetAppESeriesIscsiInterface(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(controller=dict(type="str", required=True, choices=["A", "B"]), + port=dict(type="int", required=True), + state=dict(type="str", required=False, default="enabled", choices=["enabled", "disabled"]), + address=dict(type="str", required=False), + subnet_mask=dict(type="str", required=False), + gateway=dict(type="str", required=False), + config_method=dict(type="str", required=False, default="dhcp", choices=["dhcp", "static"]), + mtu=dict(type="int", default=1500, required=False, aliases=["max_frame_size"]), + speed=dict(type="str", required=False)) + + required_if = [["config_method", "static", ["address", "subnet_mask"]]] + super(NetAppESeriesIscsiInterface, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + required_if=required_if, + supports_check_mode=True) + + args = self.module.params + self.controller = args["controller"] + self.port = args["port"] + self.mtu = args["mtu"] + self.state = args["state"] + self.address = args["address"] + self.subnet_mask = args["subnet_mask"] + self.gateway = args["gateway"] + self.config_method = args["config_method"] + self.speed = args["speed"] + + self.check_mode = self.module.check_mode + self.post_body = dict() + self.controllers = list() + self.get_target_interface_cache = None + + if self.mtu < 1500 or self.mtu > 9000: + self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.") + + if self.config_method == "dhcp" and any([self.address, self.subnet_mask, self.gateway]): + self.module.fail_json(msg="A config_method of dhcp is mutually exclusive with the address," + " subnet_mask, and gateway options.") + + # A relatively primitive regex to validate that the input is formatted like a valid ip address + address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$") + + if self.address and not address_regex.match(self.address): + self.module.fail_json(msg="An invalid ip address was provided for address.") + + if self.subnet_mask and not address_regex.match(self.subnet_mask): + self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.") + + if self.gateway and not address_regex.match(self.gateway): + self.module.fail_json(msg="An invalid ip address was provided for gateway.") + + self.get_host_board_id_cache = None + + @property + def interfaces(self): + ifaces = list() + try: + rc, ifaces = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + # Filter out non-iSCSI interfaces + iscsi_interfaces = [] + for iface in [iface for iface in ifaces if iface["interfaceType"] == "iscsi"]: + if iface["iscsi"]["interfaceData"]["type"] == "ethernet": + iscsi_interfaces.append(iface) + + return iscsi_interfaces + + def get_host_board_id(self, iface_ref): + if self.get_host_board_id_cache is None: + try: + rc, iface_board_map_list = self.request("storage-systems/%s/graph/xpath-filter?query=/ioInterfaceHicMap" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to retrieve IO interface HIC mappings! Array Id [%s]." + " Error [%s]." % (self.ssid, to_native(err))) + + self.get_host_board_id_cache = dict() + for iface_board_map in iface_board_map_list: + self.get_host_board_id_cache.update({iface_board_map["interfaceRef"]: iface_board_map["hostBoardRef"]}) + + return self.get_host_board_id_cache[iface_ref] + + + def get_controllers(self): + """Retrieve a mapping of controller labels to their references + { + "A": "070000000000000000000001", + "B": "070000000000000000000002", + } + :return: the controllers defined on the system + """ + controllers = list() + try: + rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + controllers.sort() + + controllers_dict = {} + i = ord("A") + for controller in controllers: + label = chr(i) + controllers_dict[label] = controller + i += 1 + + return controllers_dict + + def get_target_interface(self): + """Retrieve the specific controller iSCSI interface.""" + if self.get_target_interface_cache is None: + ifaces = self.interfaces + + controller_ifaces = [] + for iface in ifaces: + if self.controllers[self.controller] == iface["iscsi"]["controllerId"]: + controller_ifaces.append([iface["iscsi"]["channel"], iface, iface["iscsi"]["interfaceData"]["ethernetData"]["linkStatus"]]) + + sorted_controller_ifaces = sorted(controller_ifaces) + if self.port < 1 or self.port > len(controller_ifaces): + status_msg = ", ".join(["%s (link %s)" % (index + 1, values[2]) for index, values in enumerate(sorted_controller_ifaces)]) + self.module.fail_json(msg="Invalid controller %s iSCSI port. Available ports: %s, Array Id [%s]." + % (self.controller, status_msg, self.ssid)) + + self.get_target_interface_cache = sorted_controller_ifaces[self.port - 1][1] + return self.get_target_interface_cache + + def make_update_body(self, target_iface): + target_iface = target_iface["iscsi"] + body = dict(iscsiInterface=target_iface["id"]) + update_required = False + + if self.state == "enabled": + settings = dict() + if not target_iface["ipv4Enabled"]: + update_required = True + settings["ipv4Enabled"] = [True] + if self.mtu != target_iface["interfaceData"]["ethernetData"]["maximumFramePayloadSize"]: + update_required = True + settings["maximumFramePayloadSize"] = [self.mtu] + if self.config_method == "static": + ipv4Data = target_iface["ipv4Data"]["ipv4AddressData"] + + if ipv4Data["ipv4Address"] != self.address: + update_required = True + settings["ipv4Address"] = [self.address] + if ipv4Data["ipv4SubnetMask"] != self.subnet_mask: + update_required = True + settings["ipv4SubnetMask"] = [self.subnet_mask] + if self.gateway is not None and ipv4Data["ipv4GatewayAddress"] != self.gateway: + update_required = True + settings["ipv4GatewayAddress"] = [self.gateway] + + if target_iface["ipv4Data"]["ipv4AddressConfigMethod"] != "configStatic": + update_required = True + settings["ipv4AddressConfigMethod"] = ["configStatic"] + + elif target_iface["ipv4Data"]["ipv4AddressConfigMethod"] != "configDhcp": + update_required = True + settings.update(dict(ipv4Enabled=[True], + ipv4AddressConfigMethod=["configDhcp"])) + body["settings"] = settings + + else: + if target_iface["ipv4Enabled"]: + update_required = True + body["settings"] = dict(ipv4Enabled=[False]) + + return update_required, body + + def make_update_speed_body(self, target_iface): + target_iface = target_iface["iscsi"] + + # Check whether HIC speed should be changed. + if self.speed is None: + return False, dict() + else: + if target_iface["interfaceData"]["ethernetData"]["autoconfigSupport"]: + self.module.warn("This interface's HIC speed is autoconfigured!") + return False, dict() + if self.speed == strip_interface_speed(target_iface["interfaceData"]["ethernetData"]["currentInterfaceSpeed"]): + return False, dict() + + # Create a dictionary containing supported HIC speeds keyed by simplified value to the complete value (ie. {"10g": "speed10gig"}) + supported_speeds = dict() + for supported_speed in target_iface["interfaceData"]["ethernetData"]["supportedInterfaceSpeeds"]: + supported_speeds.update({strip_interface_speed(supported_speed): supported_speed}) + + if self.speed not in supported_speeds: + self.module.fail_json(msg="The host interface card (HIC) does not support the provided speed. Array Id [%s]. Supported speeds [%s]" % (self.ssid, ", ".join(supported_speeds.keys()))) + + body = {"settings": {"maximumInterfaceSpeed": [supported_speeds[self.speed]]}, "portsRef": {}} + hic_ref = self.get_host_board_id(target_iface["id"]) + if hic_ref == "0000000000000000000000000000000000000000": + body.update({"portsRef": {"portRefType": "baseBoard", "baseBoardRef": target_iface["id"], "hicRef": ""}}) + else: + body.update({"portsRef":{"portRefType": "hic", "hicRef": hic_ref, "baseBoardRef": ""}}) + + return True, body + + def update(self): + self.controllers = self.get_controllers() + if self.controller not in self.controllers: + self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s." % ", ".join(self.controllers.keys())) + + iface_before = self.get_target_interface() + update_required, body = self.make_update_body(iface_before) + if update_required and not self.check_mode: + try: + rc, result = self.request("storage-systems/%s/symbol/setIscsiInterfaceProperties" % self.ssid, method="POST", data=body, ignore_errors=True) + # We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook + # is cancelled mid-flight), that it isn't worth the complexity. + if rc == 422 and result["retcode"] in ["busy", "3"]: + self.module.fail_json(msg="The interface is currently busy (probably processing a previously requested modification request)." + " This operation cannot currently be completed. Array Id [%s]. Error [%s]." % (self.ssid, result)) + # Handle authentication issues, etc. + elif rc != 200: + self.module.fail_json(msg="Failed to modify the interface! Array Id [%s]. Error [%s]." % (self.ssid, to_native(result))) + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + update_speed_required, speed_body = self.make_update_speed_body(iface_before) + if update_speed_required and not self.check_mode: + try: + + rc, result = self.request("storage-systems/%s/symbol/setHostPortsAttributes?verboseErrorResponse=true" % self.ssid, method="POST", data=speed_body) + except Exception as err: + self.module.fail_json(msg="Failed to update host interface card speed. Array Id [%s], Body [%s]. Error [%s]." % (self.ssid, speed_body, to_native(err))) + + if update_required or update_speed_required: + self.module.exit_json(msg="The interface settings have been updated.", changed=True) + self.module.exit_json(msg="No changes were required.", changed=False) + + +def main(): + iface = NetAppESeriesIscsiInterface() + iface.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py new file mode 100644 index 000000000..869c2d58e --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_iscsi_target.py @@ -0,0 +1,246 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_iscsi_target +short_description: NetApp E-Series manage iSCSI target configuration +description: + - Configure the settings of an E-Series iSCSI target +author: + - Michael Price (@lmprice) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + name: + description: + - The name/alias to assign to the iSCSI target. + - This alias is often used by the initiator software in order to make an iSCSI target easier to identify. + type: str + required: false + aliases: + - alias + ping: + description: + - Enable ICMP ping responses from the configured iSCSI ports. + type: bool + default: true + required: false + chap_secret: + description: + - Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password. + - When this value is specified, we will always trigger an update (changed=True). We have no way of verifying + whether or not the password has changed. + - The chap secret may only use ascii characters with values between 32 and 126 decimal. + - The chap secret must be no less than 12 characters, but no greater than 57 characters in length. + - The chap secret is cleared when not specified or an empty string. + type: str + required: false + aliases: + - chap + - password + unnamed_discovery: + description: + - When an initiator initiates a discovery session to an initiator port, it is considered an unnamed + discovery session if the iSCSI target iqn is not specified in the request. + - This option may be disabled to increase security if desired. + type: bool + default: true + required: false +notes: + - Check mode is supported. + - Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using + M(na_santricity_iscsi_interface). + - This module requires a Web Services API version of >= 1.3. +""" + +EXAMPLES = """ + - name: Enable ping responses and unnamed discovery sessions for all iSCSI ports + na_santricity_iscsi_target: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + name: myTarget + ping: true + unnamed_discovery: true + + - name: Set the target alias and the CHAP secret + na_santricity_iscsi_target: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + name: myTarget + chap: password1234 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The iSCSI target settings have been updated. +alias: + description: + - The alias assigned to the iSCSI target. + returned: on success + sample: myArray + type: str +iqn: + description: + - The iqn (iSCSI Qualified Name), assigned to the iSCSI target. + returned: on success + sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45 + type: str +""" +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class NetAppESeriesIscsiTarget(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(name=dict(type="str", required=False, aliases=["alias"]), + ping=dict(type="bool", required=False, default=True), + chap_secret=dict(type="str", required=False, aliases=["chap", "password"], no_log=True), + unnamed_discovery=dict(type="bool", required=False, default=True)) + + super(NetAppESeriesIscsiTarget, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + + self.name = args["name"] + self.ping = args["ping"] + self.chap_secret = args["chap_secret"] + self.unnamed_discovery = args["unnamed_discovery"] + + self.check_mode = self.module.check_mode + self.post_body = dict() + self.controllers = list() + + if self.chap_secret: + if len(self.chap_secret) < 12 or len(self.chap_secret) > 57: + self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57" + " characters in length.") + + for c in self.chap_secret: + ordinal = ord(c) + if ordinal < 32 or ordinal > 126: + self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii" + " characters with decimal values between 32 and 126.") + + @property + def target(self): + """Provide information on the iSCSI Target configuration + + Sample: + { + "alias": "myCustomName", + "ping": True, + "unnamed_discovery": True, + "chap": False, + "iqn": "iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45", + } + """ + target = dict() + try: + rc, data = self.request("storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target" % self.ssid) + # This likely isn"t an iSCSI-enabled system + if not data: + self.module.fail_json(msg="This storage-system does not appear to have iSCSI interfaces. Array Id [%s]." % self.ssid) + + data = data[0] + chap = any([auth for auth in data["configuredAuthMethods"]["authMethodData"] if auth["authMethod"] == "chap"]) + target.update(dict(alias=data["alias"]["iscsiAlias"], iqn=data["nodeName"]["iscsiNodeName"], chap=chap)) + + rc, data = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData" % self.ssid) + + data = data[0] + target.update(dict(ping=data["icmpPingResponseEnabled"], unnamed_discovery=data["unnamedDiscoverySessionsEnabled"])) + + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + return target + + def apply_iscsi_settings(self): + """Update the iSCSI target alias and CHAP settings""" + update = False + target = self.target + + body = dict() + + if self.name is not None and self.name != target["alias"]: + update = True + body["alias"] = self.name + + # If the CHAP secret was provided, we trigger an update. + if self.chap_secret: + update = True + body.update(dict(enableChapAuthentication=True, + chapSecret=self.chap_secret)) + # If no secret was provided, then we disable chap + elif target["chap"]: + update = True + body.update(dict(enableChapAuthentication=False)) + + if update and not self.check_mode: + try: + self.request("storage-systems/%s/iscsi/target-settings" % self.ssid, method="POST", data=body) + except Exception as err: + self.module.fail_json(msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + return update + + def apply_target_changes(self): + update = False + target = self.target + + body = dict() + + if self.ping != target["ping"]: + update = True + body["icmpPingResponseEnabled"] = self.ping + + if self.unnamed_discovery != target["unnamed_discovery"]: + update = True + body["unnamedDiscoverySessionsEnabled"] = self.unnamed_discovery + + if update and not self.check_mode: + try: + self.request("storage-systems/%s/iscsi/entity" % self.ssid, method="POST", data=body) + except Exception as err: + self.module.fail_json(msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + return update + + def update(self): + update = self.apply_iscsi_settings() + update = self.apply_target_changes() or update + + target = self.target + data = dict((key, target[key]) for key in target if key in ["iqn", "alias"]) + + self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data) + + +def main(): + iface = NetAppESeriesIscsiTarget() + iface.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py new file mode 100644 index 000000000..18f2b622f --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_ldap.py @@ -0,0 +1,391 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_ldap +short_description: NetApp E-Series manage LDAP integration to use for authentication +description: + - Configure an E-Series system to allow authentication via an LDAP server +author: + - Michael Price (@lmprice) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - When I(state=="present") the defined LDAP domain will be added to the storage system. + - When I(state=="absent") the domain specified will be removed from the storage system. + - I(state=="disabled") will result in deleting all existing LDAP domains on the storage system. + type: str + choices: + - present + - absent + - disabled + default: present + identifier: + description: + - This is a unique identifier for the configuration (for cases where there are multiple domains configured). + type: str + default: "default" + required: false + bind_user: + description: + - This is the user account that will be used for querying the LDAP server. + - Required when I(bind_password) is specified. + - "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com" + type: str + required: false + bind_password: + description: + - This is the password for the bind user account. + - Required when I(bind_user) is specified. + type: str + required: false + server_url: + description: + - This is the LDAP server url. + - The connection string should be specified as using the ldap or ldaps protocol along with the port information. + type: str + required: false + names: + description: + - The domain name[s] that will be utilized when authenticating to identify which domain to utilize. + - Default to use the DNS name of the I(server). + - The only requirement is that the name[s] be resolvable. + - "Example: user@example.com" + type: list + required: false + search_base: + description: + - The search base is used to find group memberships of the user. + - "Example: ou=users,dc=example,dc=com" + type: str + required: false + role_mappings: + description: + - This is where you specify which groups should have access to what permissions for the + storage-system. + - For example, all users in group A will be assigned all 4 available roles, which will allow access + to all the management functionality of the system (super-user). Those in group B only have the + storage.monitor role, which will allow only read-only access. + - This is specified as a mapping of regular expressions to a list of roles. See the examples. + - The roles that will be assigned to to the group/groups matching the provided regex. + - storage.admin allows users full read/write access to storage objects and operations. + - storage.monitor allows users read-only access to storage objects and operations. + - support.admin allows users access to hardware, diagnostic information, the Major Event + Log, and other critical support-related functionality, but not the storage configuration. + - security.admin allows users access to authentication/authorization configuration, as well + as the audit log configuration, and certification management. + type: dict + required: false + group_attributes: + description: + - The user attributes that should be considered for the group to role mapping. + - Typically this is used with something like "memberOf", and a user"s access is tested against group + membership or lack thereof. + type: list + default: ["memberOf"] + required: false + user_attribute: + description: + - This is the attribute we will use to match the provided username when a user attempts to + authenticate. + type: str + default: "sAMAccountName" + required: false +notes: + - Check mode is supported + - This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for + authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given + different (or no), access to certain aspects of the system and API. + - The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible. + - Generally, you"ll need to get the details of your organization"s LDAP server before you"ll be able to configure + the system for using LDAP authentication; every implementation is likely to be very different. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy + v3.0 and higher. +""" + +EXAMPLES = """ + - name: Disable LDAP authentication + na_santricity_ldap: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: absent + + - name: Remove the "default" LDAP domain configuration + na_santricity_ldap: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: absent + identifier: default + + - name: Define a new LDAP domain, utilizing defaults where possible + na_santricity_ldap: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: enabled + bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com" + bind_password: "mySecretPass" + server: "ldap://example.com:389" + search_base: "OU=Users,DC=example,DC=com" + role_mappings: + ".*dist-dev-storage.*": + - storage.admin + - security.admin + - support.admin + - storage.monitor +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The ldap settings have been updated. +""" +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse + + +class NetAppESeriesLdap(NetAppESeriesModule): + NO_CHANGE_MSG = "No changes were necessary." + TEMPORARY_DOMAIN = "ANSIBLE_TMP_DOMAIN" + + def __init__(self): + ansible_options = dict(state=dict(type="str", required=False, default="present", choices=["present", "absent", "disabled"]), + identifier=dict(type="str", required=False, default="default"), + bind_user=dict(type="str", required=False), + bind_password=dict(type="str", required=False, no_log=True), + names=dict(type="list", required=False), + server_url=dict(type="str", required=False), + search_base=dict(type="str", required=False), + role_mappings=dict(type="dict", required=False, no_log=True), + group_attributes=dict(type="list", default=["memberOf"], required=False), + user_attribute=dict(type="str", required=False, default="sAMAccountName")) + + required_if = [["state", "present", ["server_url"]]] + required_together = [["bind_user", "bind_password"]] + super(NetAppESeriesLdap, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + required_if=required_if, + required_together=required_together, + supports_check_mode=True) + + args = self.module.params + self.state = args["state"] + self.id = args["identifier"] + self.bind_user = args["bind_user"] + self.bind_password = args["bind_password"] + self.names = args["names"] + self.server = args["server_url"] + self.search_base = args["search_base"] + self.role_mappings = args["role_mappings"] + self.group_attributes = args["group_attributes"] + self.user_attribute = args["user_attribute"] + + if self.server and not self.names: + parts = urlparse.urlparse(self.server) + self.names = [parts.netloc.split(':')[0]] + + # Check whether request needs to be forwarded on to the controller web services rest api. + self.url_path_prefix = "" + if self.is_embedded(): + self.url_path_prefix = "storage-systems/1/" + elif self.ssid != "0" and self.ssid.lower() != "proxy": + self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/storage-systems/1/" % self.ssid + + self.existing_domain_ids = [] + self.domain = {} # Existing LDAP domain + self.body = {} # Request body + + def get_domains(self): + """Retrieve all domain information from storage system.""" + domains = None + try: + rc, response = self.request(self.url_path_prefix + "ldap") + domains = response["ldapDomains"] + except Exception as error: + self.module.fail_json(msg="Failed to retrieve current LDAP configuration. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return domains + + def build_request_body(self): + """Build the request body.""" + self.body.update({"id": self.id, "groupAttributes": self.group_attributes, "ldapUrl": self.server, "names": self.names, "roleMapCollection": []}) + + if self.search_base: + self.body.update({"searchBase": self.search_base}) + if self.user_attribute: + self.body.update({"userAttribute": self.user_attribute}) + if self.bind_user and self.bind_password: + self.body.update({"bindLookupUser": {"password": self.bind_password, "user": self.bind_user}}) + if self.role_mappings: + for regex, names in self.role_mappings.items(): + for name in names: + self.body["roleMapCollection"].append({"groupRegex": regex, "ignorecase": True, "name": name}) + + def are_changes_required(self): + """Determine whether any changes are required and build request body.""" + change_required = False + domains = self.get_domains() + + if self.state == "disabled" and domains: + self.existing_domain_ids = [domain["id"] for domain in domains] + change_required = True + + elif self.state == "present": + for domain in domains: + if self.id == domain["id"]: + self.domain = domain + + if self.state == "absent": + change_required = True + elif (len(self.group_attributes) != len(domain["groupAttributes"]) or + any([a not in domain["groupAttributes"] for a in self.group_attributes])): + change_required = True + elif self.user_attribute != domain["userAttribute"]: + change_required = True + elif self.search_base.lower() != domain["searchBase"].lower(): + change_required = True + elif self.server != domain["ldapUrl"]: + change_required = True + elif any(name not in domain["names"] for name in self.names) or any(name not in self.names for name in domain["names"]): + change_required = True + elif self.role_mappings: + if len(self.body["roleMapCollection"]) != len(domain["roleMapCollection"]): + change_required = True + else: + for role_map in self.body["roleMapCollection"]: + for existing_role_map in domain["roleMapCollection"]: + if role_map["groupRegex"] == existing_role_map["groupRegex"] and role_map["name"] == existing_role_map["name"]: + break + else: + change_required = True + + if not change_required and self.bind_user and self.bind_password: + if self.bind_user != domain["bindLookupUser"]["user"]: + change_required = True + elif self.bind_password: + temporary_domain = None + try: + # Check whether temporary domain exists + if any(domain["id"] == self.TEMPORARY_DOMAIN for domain in domains): + self.delete_domain(self.TEMPORARY_DOMAIN) + + temporary_domain = self.add_domain(temporary=True, skip_test=True) + rc, tests = self.request(self.url_path_prefix + "ldap/test", method="POST") + + temporary_domain_test = {} + domain_test = {} + for test in tests: + if test["id"] == temporary_domain["id"]: + temporary_domain_test = test["result"] + if self.id == test["id"]: + domain_test = test["result"] + + if temporary_domain_test["authenticationTestResult"] == "ok" and domain_test["authenticationTestResult"] != "ok": + change_required = True + elif temporary_domain_test["authenticationTestResult"] != "ok": + self.module.fail_json(msg="Failed to authenticate bind credentials! Array Id [%s]." % self.ssid) + + finally: + if temporary_domain: + self.delete_domain(self.TEMPORARY_DOMAIN) + break + else: + change_required = True + elif self.state == "absent": + for domain in domains: + if self.id == domain["id"]: + change_required = True + + return change_required + + def add_domain(self, temporary=False, skip_test=False): + """Add domain to storage system.""" + domain = None + body = self.body.copy() + if temporary: + body.update({"id": self.TEMPORARY_DOMAIN, "names": [self.TEMPORARY_DOMAIN]}) + + try: + rc, response = self.request(self.url_path_prefix + "ldap/addDomain?skipTest=%s" % ("true" if not skip_test else "false"), + method="POST", data=body) + domain = response["ldapDomains"][0] + except Exception as error: + self.module.fail_json(msg="Failed to create LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return domain + + def update_domain(self): + """Update existing domain on storage system.""" + try: + rc, response = self.request(self.url_path_prefix + "ldap/%s" % self.domain["id"], method="POST", data=self.body) + except Exception as error: + self.module.fail_json(msg="Failed to update LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def delete_domain(self, domain_id): + """Delete specific domain on the storage system.""" + try: + url = self.url_path_prefix + "ldap/%s" % domain_id + rc, response = self.request(self.url_path_prefix + "ldap/%s" % domain_id, method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete LDAP domain. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def disable_domains(self): + """Delete all existing domains on storage system.""" + for domain_id in self.existing_domain_ids: + self.delete_domain(domain_id) + + def apply(self): + """Apply any necessary changes to the LDAP configuration.""" + self.build_request_body() + change_required = self.are_changes_required() + + if change_required and not self.module.check_mode: + if self.state == "present": + if self.domain: + self.update_domain() + self.module.exit_json(msg="LDAP domain has been updated. Array Id: [%s]" % self.ssid, changed=change_required) + else: + self.add_domain() + self.module.exit_json(msg="LDAP domain has been added. Array Id: [%s]" % self.ssid, changed=change_required) + elif self.state == "absent": + if self.domain: + self.delete_domain(self.domain["id"]) + self.module.exit_json(msg="LDAP domain has been removed. Array Id: [%s]" % self.ssid, changed=change_required) + else: + self.disable_domains() + self.module.exit_json(msg="All LDAP domains have been removed. Array Id: [%s]" % self.ssid, changed=change_required) + + self.module.exit_json(msg="No changes have been made to the LDAP configuration. Array Id: [%s]" % self.ssid, changed=change_required) + + +def main(): + ldap = NetAppESeriesLdap() + ldap.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py new file mode 100644 index 000000000..d3d70fb5d --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_lun_mapping.py @@ -0,0 +1,247 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: na_santricity_lun_mapping +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +short_description: NetApp E-Series manage lun mappings +description: + - Create, delete, or modify mappings between a volume and a targeted host/host+ group. +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - Present will ensure the mapping exists, absent will remove the mapping. + type: str + required: False + choices: ["present", "absent"] + default: "present" + target: + description: + - The name of host or hostgroup you wish to assign to the mapping + - If omitted, the default hostgroup is used. + - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here. + type: str + required: False + volume_name: + description: + - The name of the volume you wish to include in the mapping. + - Use ACCESS_VOLUME to reference the in-band access management volume. + type: str + required: True + aliases: + - volume + lun: + description: + - The LUN value you wish to give the mapping. + - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here. + - LUN value will be determine by the storage-system when not specified. + type: int + required: false +''' + +EXAMPLES = ''' +--- + - name: Map volume1 to the host target host1 + na_santricity_lun_mapping: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: present + target: host1 + volume: volume1 + - name: Delete the lun mapping between volume1 and host1 + na_santricity_lun_mapping: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: absent + target: host1 + volume: volume1 +''' +RETURN = ''' +msg: + description: success of the module + returned: always + type: str + sample: Lun mapping is complete +''' +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesLunMapping(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(state=dict(required=False, choices=["present", "absent"], default="present"), + target=dict(required=False, default=None), + volume_name=dict(required=True, aliases=["volume"]), + lun=dict(type="int", required=False)) + + super(NetAppESeriesLunMapping, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.state = args["state"] == "present" + self.target = args["target"] if args["target"] else "DEFAULT_HOSTGROUP" + self.volume = args["volume_name"] if args["volume_name"] != "ACCESS_VOLUME" else "Access" + self.lun = args["lun"] + self.check_mode = self.module.check_mode + self.mapping_info = None + + if not self.url.endswith('/'): + self.url += '/' + + def update_mapping_info(self): + """Collect the current state of the storage array.""" + response = None + try: + rc, response = self.request("storage-systems/%s/graph" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error))) + + # Create dictionary containing host/cluster references mapped to their names + target_reference = {} + target_name = {} + target_type = {} + + for host in response["storagePoolBundle"]["host"]: + target_reference.update({host["hostRef"]: host["name"]}) + target_name.update({host["name"]: host["hostRef"]}) + target_type.update({host["name"]: "host"}) + + for cluster in response["storagePoolBundle"]["cluster"]: + + # Verify there is no ambiguity between target's type (ie host and group have the same name) + if cluster["name"] == self.target and self.target in target_name.keys(): + self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group targets! Id [%s]" % self.ssid) + + target_reference.update({cluster["clusterRef"]: cluster["name"]}) + target_name.update({cluster["name"]: cluster["clusterRef"]}) + target_type.update({cluster["name"]: "group"}) + + target_reference.update({"0000000000000000000000000000000000000000": "DEFAULT_HOSTGROUP"}) + target_name.update({"DEFAULT_HOSTGROUP": "0000000000000000000000000000000000000000"}) + target_type.update({"DEFAULT_HOSTGROUP": "group"}) + + volume_reference = {} + volume_name = {} + lun_name = {} + for volume in response["volume"]: + volume_reference.update({volume["volumeRef"]: volume["name"]}) + volume_name.update({volume["name"]: volume["volumeRef"]}) + if volume["listOfMappings"]: + lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]}) + for volume in response["highLevelVolBundle"]["thinVolume"]: + volume_reference.update({volume["volumeRef"]: volume["name"]}) + volume_name.update({volume["name"]: volume["volumeRef"]}) + if volume["listOfMappings"]: + lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]}) + + volume_name.update({response["sa"]["accessVolume"]["name"]: response["sa"]["accessVolume"]["accessVolumeRef"]}) + volume_reference.update({response["sa"]["accessVolume"]["accessVolumeRef"]: response["sa"]["accessVolume"]["name"]}) + + # Build current mapping object + self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"], + map_reference=mapping["mapRef"], + lun_mapping_reference=mapping["lunMappingRef"], + lun=mapping["lun"] + ) for mapping in response["storagePoolBundle"]["lunMapping"]], + volume_by_reference=volume_reference, + volume_by_name=volume_name, + lun_by_name=lun_name, + target_by_reference=target_reference, + target_by_name=target_name, + target_type_by_name=target_type) + + def get_lun_mapping(self): + """Find the matching lun mapping reference. + + Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun + """ + target_match = False + reference = None + lun = None + + self.update_mapping_info() + + # Verify that when a lun is specified that it does not match an existing lun value unless it is associated with + # the specified volume (ie for an update) + if self.lun and any((self.lun == lun_mapping["lun"] and + self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and + self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]] + ) for lun_mapping in self.mapping_info["lun_mapping"]): + self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid) + + # Verify volume and target exist if needed for expected state. + if self.state: + if self.volume not in self.mapping_info["volume_by_name"].keys(): + self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid) + if self.target and self.target not in self.mapping_info["target_by_name"].keys(): + self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid) + + for lun_mapping in self.mapping_info["lun_mapping"]: + + # Find matching volume reference + if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]: + reference = lun_mapping["lun_mapping_reference"] + lun = lun_mapping["lun"] + + # Determine if lun mapping is attached to target with the + if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and + self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and + (self.lun is None or lun == self.lun)): + target_match = True + + return target_match, reference, lun + + def update(self): + """Execute the changes the require changes on the storage array.""" + target_match, lun_reference, lun = self.get_lun_mapping() + update = (self.state and not target_match) or (not self.state and lun_reference) + + if update and not self.check_mode: + try: + if self.state: + body = dict() + target = None if not self.target else self.mapping_info["target_by_name"][self.target] + if target: + body.update(dict(targetId=target)) + if self.lun is not None: + body.update(dict(lun=self.lun)) + + if lun_reference: + + rc, response = self.request("storage-systems/%s/volume-mappings/%s/move" % (self.ssid, lun_reference), method="POST", data=body) + else: + body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume])) + rc, response = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=body) + + else: # Remove existing lun mapping for volume and target + rc, response = self.request("storage-systems/%s/volume-mappings/%s" % (self.ssid, lun_reference), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]" % (self.ssid, to_native(error))) + + self.module.exit_json(msg="Lun mapping is complete.", changed=update) + + +def main(): + mapping = NetAppESeriesLunMapping() + mapping.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py new file mode 100644 index 000000000..f4bef849c --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_mgmt_interface.py @@ -0,0 +1,656 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +--- +module: na_santricity_mgmt_interface +short_description: NetApp E-Series manage management interface configuration +description: + - Configure the E-Series management interfaces +author: + - Michael Price (@lmprice) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - Enable or disable IPv4 network interface configuration. + - Either IPv4 or IPv6 must be enabled otherwise error will occur. + - Assumed to be I(state==enabled) when I(config_method) is specified unless defined. + choices: + - enabled + - disabled + type: str + required: false + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are represented alphabetically, with the first controller as A, + the second as B, and so on. + - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard + limitation and could change in the future. + choices: + - A + - B + type: str + required: true + port: + description: + - The ethernet port configuration to modify. + - The channel represents the port number left to right on the controller, beginning with 1. + - Required when I(config_method) is specified. + type: int + required: false + address: + description: + - The IPv4 address to assign to the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + required: false + subnet_mask: + description: + - The subnet mask to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + required: false + gateway: + description: + - The IPv4 gateway address to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + required: false + config_method: + description: + - The configuration method type to use for network interface ports. + - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway). + choices: + - dhcp + - static + type: str + required: false + dns_config_method: + description: + - The configuration method type to use for DNS services. + - dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup). + choices: + - dhcp + - static + type: str + required: false + dns_address: + description: + - Primary IPv4 or IPv6 DNS server address + type: str + required: false + dns_address_backup: + description: + - Secondary IPv4 or IPv6 DNS server address + type: str + required: false + ntp_config_method: + description: + - The configuration method type to use for NTP services. + - disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup). + - dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup). + choices: + - disabled + - dhcp + - static + type: str + required: false + ntp_address: + description: + - Primary IPv4, IPv6, or FQDN NTP server address + type: str + required: false + ntp_address_backup: + description: + - Secondary IPv4, IPv6, or FQDN NTP server address + type: str + required: false + ssh: + description: + - Enable ssh access to the controller for debug purposes. + - This is a controller-level setting. + - rlogin/telnet will be enabled for ancient equipment where ssh is not available. + type: bool + required: false +notes: + - Check mode is supported. + - It is highly recommended to have a minimum of one up management port on each controller. + - When using SANtricity Web Services Proxy, use M(na_santricity_storage_system) to update management paths. This is required because of a known issue + and will be addressed in the proxy version 4.1. After the resolution the management ports should automatically be updated. + - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address + via dhcp, etc), can take seconds or minutes longer to take effect. +""" + +EXAMPLES = """ + - name: Configure the first port on the A controller with a static IPv4 address + na_santricity_mgmt_interface: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + port: "1" + controller: "A" + config_method: static + address: "192.168.1.100" + subnet_mask: "255.255.255.0" + gateway: "192.168.1.1" + + - name: Disable ipv4 connectivity for the second port on the B controller + na_santricity_mgmt_interface: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + port: "2" + controller: "B" + enable_interface: no + + - name: Enable ssh access for ports one and two on controller A + na_santricity_mgmt_interface: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + port: "1" + controller: "A" + ssh: yes + + - name: Configure static DNS settings for the first port on controller A + na_santricity_mgmt_interface: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + port: "1" + controller: "A" + dns_config_method: static + dns_address: "192.168.1.100" + dns_address_backup: "192.168.1.1" + +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The interface settings have been updated. +available_embedded_api_urls: + description: List containing available web services embedded REST API urls + returned: on success + type: list + sample: +""" +from time import sleep + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native +from ansible.module_utils import six + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse + +try: + import ipaddress +except ImportError: + HAS_IPADDRESS = False +else: + HAS_IPADDRESS = True + + +def is_ipv4(address): + """Determine whether address is IPv4.""" + try: + if six.PY2: + address = six.u(address) + ipaddress.IPv4Address(address) + return True + except Exception as error: + return False + + +def is_ipv6(address): + """Determine whether address is IPv6.""" + try: + if six.PY2: + address = six.u(address) + ipaddress.IPv6Address(address) + return True + except Exception as error: + return False + + +class NetAppESeriesMgmtInterface(NetAppESeriesModule): + MAXIMUM_VERIFICATION_TIMEOUT = 120 + + def __init__(self): + ansible_options = dict(state=dict(type="str", choices=["enabled", "disabled"], required=False), + controller=dict(type="str", required=True, choices=["A", "B"]), + port=dict(type="int"), + address=dict(type="str", required=False), + subnet_mask=dict(type="str", required=False), + gateway=dict(type="str", required=False), + config_method=dict(type="str", required=False, choices=["dhcp", "static"]), + dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]), + dns_address=dict(type="str", required=False), + dns_address_backup=dict(type="str", required=False), + ntp_config_method=dict(type="str", required=False, choices=["disabled", "dhcp", "static"]), + ntp_address=dict(type="str", required=False), + ntp_address_backup=dict(type="str", required=False), + ssh=dict(type="bool", required=False)) + + required_if = [["config_method", "static", ["port", "address", "subnet_mask"]], + ["dns_config_method", "static", ["dns_address"]], + ["ntp_config_method", "static", ["ntp_address"]]] + + super(NetAppESeriesMgmtInterface, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + required_if=required_if, + supports_check_mode=True) + + args = self.module.params + if args["state"] is None: + if args["config_method"] is not None: + self.enable_interface = True + else: + self.enable_interface = None + else: + self.enable_interface = args["state"] == "enabled" + + self.controller = args["controller"] + self.channel = args["port"] + + self.config_method = args["config_method"] + self.address = args["address"] + self.subnet_mask = args["subnet_mask"] + self.gateway = args["gateway"] + + self.dns_config_method = args["dns_config_method"] + self.dns_address = args["dns_address"] + self.dns_address_backup = args["dns_address_backup"] + + self.ntp_config_method = args["ntp_config_method"] + self.ntp_address = args["ntp_address"] + self.ntp_address_backup = args["ntp_address_backup"] + + self.ssh = args["ssh"] + + self.body = {} + self.interface_info = {} + self.alt_interface_addresses = [] + self.all_interface_addresses = [] + self.use_alternate_address = False + self.alt_url_path = None + + self.available_embedded_api_urls = [] + + def get_controllers(self): + """Retrieve a mapping of controller labels to their references + :return: controllers defined on the system. Example: {'A': '070000000000000000000001', 'B': '070000000000000000000002'} + """ + try: + rc, controllers = self.request("storage-systems/%s/controllers" % self.ssid) + except Exception as err: + controllers = list() + self.module.fail_json(msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + controllers.sort(key=lambda c: c['physicalLocation']['slot']) + controllers_dict = dict() + i = ord('A') + for controller in controllers: + label = chr(i) + settings = dict(controllerSlot=controller['physicalLocation']['slot'], + controllerRef=controller['controllerRef'], + ssh=controller['networkSettings']['remoteAccessEnabled']) + controllers_dict[label] = settings + i += 1 + return controllers_dict + + def update_target_interface_info(self, retries=60): + """Discover and update cached interface info.""" + net_interfaces = list() + try: + rc, net_interfaces = self.request("storage-systems/%s/configuration/ethernet-interfaces" % self.ssid) + except Exception as error: + if retries > 0: + self.update_target_interface_info(retries=retries - 1) + return + else: + self.module.fail_json(msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + iface = None + channels = {} + controller_info = self.get_controllers()[self.controller] + controller_ref = controller_info["controllerRef"] + controller_ssh = controller_info["ssh"] + controller_dns = None + controller_ntp = None + dummy_interface_id = None # Needed for when a specific interface is not required (ie dns/ntp/ssh changes only) + for net in net_interfaces: + if net["controllerRef"] == controller_ref: + channels.update({net["channel"]: net["linkStatus"]}) + if dummy_interface_id is None: + dummy_interface_id = net["interfaceRef"] + if controller_dns is None: + controller_dns = net["dnsProperties"] + if controller_ntp is None: + controller_ntp = net["ntpProperties"] + + if net["ipv4Enabled"] and net["linkStatus"] == "up": + self.all_interface_addresses.append(net["ipv4Address"]) + if net["controllerRef"] == controller_ref and net["channel"] == self.channel: + iface = net + elif net["ipv4Enabled"] and net["linkStatus"] == "up": + self.alt_interface_addresses.append(net["ipv4Address"]) + + # Add controller specific information (ssh, dns and ntp) + self.interface_info.update({ + "id": dummy_interface_id, + "controllerRef": controller_ref, + "ssh": controller_ssh, + "dns_config_method": controller_dns["acquisitionProperties"]["dnsAcquisitionType"], + "dns_servers": controller_dns["acquisitionProperties"]["dnsServers"], + "ntp_config_method": controller_ntp["acquisitionProperties"]["ntpAcquisitionType"], + "ntp_servers": controller_ntp["acquisitionProperties"]["ntpServers"],}) + + # Add interface specific information when configuring IP address. + if self.config_method is not None: + if iface is None: + available_controllers = ["%s (%s)" % (channel, status) for channel, status in channels.items()] + self.module.fail_json(msg="Invalid port number! Controller %s ports: [%s]. Array [%s]" + % (self.controller, ",".join(available_controllers), self.ssid)) + else: + self.interface_info.update({ + "id": iface["interfaceRef"], + "controllerSlot": iface["controllerSlot"], + "channel": iface["channel"], + "link_status": iface["linkStatus"], + "enabled": iface["ipv4Enabled"], + "config_method": iface["ipv4AddressConfigMethod"], + "address": iface["ipv4Address"], + "subnet_mask": iface["ipv4SubnetMask"], + "gateway": iface["ipv4GatewayAddress"], + "ipv6_enabled": iface["ipv6Enabled"],}) + + def update_body_enable_interface_setting(self): + """Enable or disable the IPv4 network interface.""" + change_required = False + if not self.enable_interface and not self.interface_info["ipv6_enabled"]: + self.module.fail_json(msg="Either IPv4 or IPv6 must be enabled. Array [%s]." % self.ssid) + + if self.enable_interface != self.interface_info["enabled"]: + change_required = True + self.body.update({"ipv4Enabled": self.enable_interface}) + return change_required + + def update_body_interface_settings(self): + """Update network interface settings.""" + change_required = False + if self.config_method == "dhcp": + if self.interface_info["config_method"] != "configDhcp": + if self.interface_info["address"] in self.url: + self.use_alternate_address = True + change_required = True + self.body.update({"ipv4AddressConfigMethod": "configDhcp"}) + else: + self.body.update({"ipv4AddressConfigMethod": "configStatic", "ipv4Address": self.address, "ipv4SubnetMask": self.subnet_mask}) + if self.interface_info["config_method"] != "configStatic": + change_required = True + if self.address and self.interface_info["address"] != self.address: + if self.interface_info["address"] in self.url: + self.use_alternate_address = True + change_required = True + if self.subnet_mask and self.interface_info["subnet_mask"] != self.subnet_mask: + change_required = True + if self.gateway and self.interface_info["gateway"] != self.gateway: + self.body.update({"ipv4GatewayAddress": self.gateway}) + change_required = True + + return change_required + + def update_body_dns_server_settings(self): + """Add DNS server information to the request body.""" + change_required = False + if self.dns_config_method == "dhcp": + if self.interface_info["dns_config_method"] != "dhcp": + change_required = True + self.body.update({"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "dhcp"}}) + + elif self.dns_config_method == "static": + dns_servers = [] + if ((self.dns_address and self.dns_address_backup and (not self.interface_info["dns_servers"] or + len(self.interface_info["dns_servers"]) != 2)) or + (self.dns_address and not self.dns_address_backup and (not self.interface_info["dns_servers"] or + len(self.interface_info["dns_servers"]) != 1))): + change_required = True + + # Check primary DNS address + if self.dns_address: + if is_ipv4(self.dns_address): + dns_servers.append({"addressType": "ipv4", "ipv4Address": self.dns_address}) + if (not self.interface_info["dns_servers"] or len(self.interface_info["dns_servers"]) < 1 or + self.interface_info["dns_servers"][0]["addressType"] != "ipv4" or + self.interface_info["dns_servers"][0]["ipv4Address"] != self.dns_address): + change_required = True + elif is_ipv6(self.dns_address): + dns_servers.append({"addressType": "ipv6", "ipv6Address": self.dns_address}) + if (not self.interface_info["dns_servers"] or len(self.interface_info["dns_servers"]) < 1 or + self.interface_info["dns_servers"][0]["addressType"] != "ipv6" or + self.interface_info["dns_servers"][0]["ipv6Address"].replace(":", "").lower() != self.dns_address.replace(":", "").lower()): + change_required = True + else: + self.module.fail_json(msg="Invalid IP address! DNS address must be either IPv4 or IPv6. Address [%s]." + " Array [%s]." % (self.dns_address, self.ssid)) + + # Check secondary DNS address + if self.dns_address_backup: + if is_ipv4(self.dns_address_backup): + dns_servers.append({"addressType": "ipv4", "ipv4Address": self.dns_address_backup}) + if (not self.interface_info["dns_servers"] or len(self.interface_info["dns_servers"]) < 2 or + self.interface_info["dns_servers"][1]["addressType"] != "ipv4" or + self.interface_info["dns_servers"][1]["ipv4Address"] != self.dns_address_backup): + change_required = True + elif is_ipv6(self.dns_address_backup): + dns_servers.append({"addressType": "ipv6", "ipv6Address": self.dns_address_backup}) + if (not self.interface_info["dns_servers"] or len(self.interface_info["dns_servers"]) < 2 or + self.interface_info["dns_servers"][1]["addressType"] != "ipv6" or + self.interface_info["dns_servers"][1]["ipv6Address"].replace(":", "").lower() != self.dns_address_backup.replace(":", "").lower()): + change_required = True + else: + self.module.fail_json(msg="Invalid IP address! DNS address must be either IPv4 or IPv6. Address [%s]." + " Array [%s]." % (self.dns_address, self.ssid)) + + self.body.update({"dnsAcquisitionDescriptor": {"dnsAcquisitionType": "stat", "dnsServers": dns_servers}}) + + return change_required + + def update_body_ntp_server_settings(self): + """Add NTP server information to the request body.""" + change_required = False + if self.ntp_config_method == "disabled": + if self.interface_info["ntp_config_method"] != "disabled": + change_required = True + self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "disabled"}}) + + elif self.ntp_config_method == "dhcp": + if self.interface_info["ntp_config_method"] != "dhcp": + change_required = True + self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "dhcp"}}) + + elif self.ntp_config_method == "static": + ntp_servers = [] + if ((self.ntp_address and self.ntp_address_backup and (not self.interface_info["ntp_servers"] or + len(self.interface_info["ntp_servers"]) != 2)) or + (self.ntp_address and not self.ntp_address_backup and (not self.interface_info["ntp_servers"] or + len(self.interface_info["ntp_servers"]) != 1))): + change_required = True + + # Check primary NTP address + if self.ntp_address: + if is_ipv4(self.ntp_address): + ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": self.ntp_address}}) + if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 1 or + self.interface_info["ntp_servers"][0]["addrType"] != "ipvx" or + self.interface_info["ntp_servers"][0]["ipvxAddress"]["addressType"] != "ipv4" or + self.interface_info["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address): + change_required = True + elif is_ipv6(self.ntp_address): + ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv6", "ipv6Address": self.ntp_address}}) + if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 1 or + self.interface_info["ntp_servers"][0]["addrType"] != "ipvx" or + self.interface_info["ntp_servers"][0]["ipvxAddress"]["addressType"] != "ipv6" or + self.interface_info["ntp_servers"][0]["ipvxAddress"][ + "ipv6Address"].replace(":", "").lower() != self.ntp_address.replace(":", "").lower()): + change_required = True + else: + ntp_servers.append({"addrType": "domainName", "domainName": self.ntp_address}) + if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 1 or + self.interface_info["ntp_servers"][0]["addrType"] != "domainName" or + self.interface_info["ntp_servers"][0]["domainName"] != self.ntp_address): + change_required = True + + # Check secondary NTP address + if self.ntp_address_backup: + if is_ipv4(self.ntp_address_backup): + ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv4", "ipv4Address": self.ntp_address_backup}}) + if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 2 or + self.interface_info["ntp_servers"][1]["addrType"] != "ipvx" or + self.interface_info["ntp_servers"][1]["ipvxAddress"]["addressType"] != "ipv4" or + self.interface_info["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup): + change_required = True + elif is_ipv6(self.ntp_address_backup): + ntp_servers.append({"addrType": "ipvx", "ipvxAddress": {"addressType": "ipv6", "ipv6Address": self.ntp_address_backup}}) + if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 2 or + self.interface_info["ntp_servers"][1]["addrType"] != "ipvx" or + self.interface_info["ntp_servers"][1]["ipvxAddress"]["addressType"] != "ipv6" or + self.interface_info["ntp_servers"][1]["ipvxAddress"][ + "ipv6Address"].replace(":", "").lower() != self.ntp_address_backup.replace(":", "").lower()): + change_required = True + else: + ntp_servers.append({"addrType": "domainName", "domainName": self.ntp_address_backup}) + if (not self.interface_info["ntp_servers"] or len(self.interface_info["ntp_servers"]) < 2 or + self.interface_info["ntp_servers"][1]["addrType"] != "domainName" or + self.interface_info["ntp_servers"][1]["domainName"].lower() != self.ntp_address_backup.lower()): + change_required = True + + self.body.update({"ntpAcquisitionDescriptor": {"ntpAcquisitionType": "stat", "ntpServers": ntp_servers}}) + + return change_required + + def update_body_ssh_setting(self): + """Configure network interface ports for remote ssh access.""" + change_required = False + if self.interface_info["ssh"] != self.ssh: + change_required = True + self.body.update({"enableRemoteAccess": self.ssh}) + return change_required + + def update_request_body(self): + """Verify all required changes have been made.""" + self.update_target_interface_info() + self.body = {"controllerRef": self.get_controllers()[self.controller]["controllerRef"], "interfaceRef": self.interface_info["id"]} + + change_required = False + if self.enable_interface is not None: + change_required = self.update_body_enable_interface_setting() + if self.config_method is not None: + change_required = self.update_body_interface_settings() or change_required + if self.dns_config_method is not None: + change_required = self.update_body_dns_server_settings() or change_required + if self.ntp_config_method is not None: + change_required = self.update_body_ntp_server_settings() or change_required + if self.ssh is not None: + change_required = self.update_body_ssh_setting() or change_required + + self.module.log("update_request_body change_required: %s" % change_required) + return change_required + + def update_url(self, retries=60): + """Update eseries base class url if on is available.""" + for address in self.alt_interface_addresses: + if address not in self.url and address != "0.0.0.0": + parsed_url = urlparse.urlparse(self.url) + location = parsed_url.netloc.split(":") + location[0] = address + self.url = "%s://%s/" % (parsed_url.scheme, ":".join(location)) + self.available_embedded_api_urls = ["%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH)] + self.module.warn("Using alternate address [%s]" % self.available_embedded_api_urls[0]) + break + else: + if retries > 0: + sleep(1) + self.update_target_interface_info() + self.update_url(retries=retries - 1) + else: + self.module.warn("Unable to obtain an alternate url!") + + def update(self): + """Update controller with new interface, dns service, ntp service and/or remote ssh access information.""" + change_required = self.update_request_body() + + # Build list of available web services rest api urls + self.available_embedded_api_urls = [] + parsed_url = urlparse.urlparse(self.url) + location = parsed_url.netloc.split(":") + for address in self.all_interface_addresses: + location[0] = address + self.available_embedded_api_urls = ["%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH)] + + if change_required and not self.module.check_mode: + + # Update url if currently used interface will be modified + if self.is_embedded(): + if self.use_alternate_address: + self.update_url() + if self.address: + parsed_url = urlparse.urlparse(self.url) + location = parsed_url.netloc.split(":") + location[0] = self.address + self.available_embedded_api_urls.append("%s://%s/%s" % (parsed_url.scheme, ":".join(location), self.DEFAULT_REST_API_PATH)) + else: + self.available_embedded_api_urls = ["%s/%s" % (self.url, self.DEFAULT_REST_API_PATH)] + + # Update management interface + try: + rc, response = self.request("storage-systems/%s/configuration/ethernet-interfaces" % self.ssid, method="POST", data=self.body) + except Exception as error: + pass + + # Validate all changes have been made + for retries in range(self.MAXIMUM_VERIFICATION_TIMEOUT): + if not self.update_request_body(): + break + sleep(1) + else: + self.module.warn("Changes failed to complete! Timeout waiting for management interface to update. Array [%s]." % self.ssid) + self.module.exit_json(msg="The interface settings have been updated.", changed=change_required, + available_embedded_api_urls=self.available_embedded_api_urls) + self.module.exit_json(msg="No changes are required.", changed=change_required, + available_embedded_api_urls=self.available_embedded_api_urls if self.is_embedded() else []) + + +def main(): + interface = NetAppESeriesMgmtInterface() + interface.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py new file mode 100644 index 000000000..d4d042d01 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_nvme_interface.py @@ -0,0 +1,305 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_nvme_interface +short_description: NetApp E-Series manage NVMe interface configuration +description: Configure settings of an E-Series NVMe interface +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + address: + description: + - The IPv4 address to assign to the NVMe interface + type: str + required: false + subnet_mask: + description: + - The subnet mask to utilize for the interface. + - Only applicable when configuring RoCE + - Mutually exclusive with I(config_method=dhcp) + type: str + required: false + gateway: + description: + - The IPv4 gateway address to utilize for the interface. + - Only applicable when configuring RoCE + - Mutually exclusive with I(config_method=dhcp) + type: str + required: false + config_method: + description: + - The configuration method type to use for this interface. + - Only applicable when configuring RoCE + - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway). + type: str + choices: + - dhcp + - static + required: false + default: dhcp + mtu: + description: + - The maximum transmission units (MTU), in bytes. + - Only applicable when configuring RoCE + - This allows you to configure a larger value for the MTU, in order to enable jumbo frames + (any value > 1500). + - Generally, it is necessary to have your host, switches, and other components not only support jumbo + frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to + leave this at the default. + type: int + default: 1500 + required: false + aliases: + - max_frame_size + speed: + description: + - This is the ethernet port speed measured in Gb/s. + - Value must be a supported speed or auto for automatically negotiating the speed with the port. + - Only applicable when configuring RoCE + - The configured ethernet port speed should match the speed capability of the SFP on the selected port. + type: str + required: false + default: auto + state: + description: + - Whether or not the specified RoCE interface should be enabled. + - Only applicable when configuring RoCE + choices: + - enabled + - disabled + type: str + required: false + default: enabled + channel: + description: + - This option specifies the which NVMe controller channel to configure. + - The list of choices is not necessarily comprehensive. It depends on the number of ports + that are available in the system. + - The numerical value represents the number of the channel (typically from left to right on the HIC), + beginning with a value of 1. + type: int + required: false + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are presented alphabetically, with the first controller as A and the second as B. + type: str + required: false + choices: [A, B] +""" +EXAMPLES = """ +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The interface settings have been updated. +""" +import re + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesNvmeInterface(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(address=dict(type="str", required=False), + subnet_mask=dict(type="str", required=False), + gateway=dict(type="str", required=False), + config_method=dict(type="str", required=False, default="dhcp", choices=["dhcp", "static"]), + mtu=dict(type="int", default=1500, required=False, aliases=["max_frame_size"]), + speed=dict(type="str", default="auto", required=False), + state=dict(type="str", default="enabled", required=False, choices=["enabled", "disabled"]), + channel=dict(type="int", required=True), + controller=dict(type="str", required=True, choices=["A", "B"])) + + required_if = [["config_method", "static", ["address", "subnet_mask"]]] + super(NetAppESeriesNvmeInterface, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + required_if=required_if, + supports_check_mode=True) + + args = self.module.params + self.address = args["address"] + self.subnet_mask = args["subnet_mask"] + self.gateway = args["gateway"] + self.config_method = "configDhcp" if args["config_method"] == "dhcp" else "configStatic" + self.mtu = args["mtu"] + self.speed = args["speed"] + self.enabled = args["state"] == "enabled" + self.channel = args["channel"] + self.controller = args["controller"] + + address_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$") + if self.address and not address_regex.match(self.address): + self.module.fail_json(msg="An invalid ip address was provided for address. Address [%s]." % self.address) + if self.subnet_mask and not address_regex.match(self.subnet_mask): + self.module.fail_json(msg="An invalid ip address was provided for subnet_mask. Subnet mask [%s]." % self.subnet_mask) + if self.gateway and not address_regex.match(self.gateway): + self.module.fail_json(msg="An invalid ip address was provided for gateway. Gateway [%s]." % self.gateway) + + self.get_target_interface_cache = None + + def get_nvmeof_interfaces(self): + """Retrieve all interfaces that are using nvmeof""" + ifaces = list() + try: + rc, ifaces = self.request("storage-systems/%s/interfaces?channelType=hostside" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + # Filter out all not nvme-nvmeof hostside interfaces. + nvmeof_ifaces = [] + for iface in ifaces: + interface_type = iface["ioInterfaceTypeData"]["interfaceType"] + properties = iface["commandProtocolPropertiesList"]["commandProtocolProperties"] + + try: + link_status = iface["ioInterfaceTypeData"]["ib"]["linkState"] + except Exception as error: + link_status = iface["ioInterfaceTypeData"]["ethernet"]["interfaceData"]["ethernetData"]["linkStatus"] + + if (properties and properties[0]["commandProtocol"] == "nvme" and + properties[0]["nvmeProperties"]["commandSet"] == "nvmeof"): + nvmeof_ifaces.append({"properties": properties[0]["nvmeProperties"]["nvmeofProperties"], + "reference": iface["interfaceRef"], + "channel": iface["ioInterfaceTypeData"][iface["ioInterfaceTypeData"]["interfaceType"]]["channel"], + "interface_type": interface_type, + "interface": iface["ioInterfaceTypeData"][interface_type], + "controller_id": iface["controllerRef"], + "link_status": link_status}) + return nvmeof_ifaces + + def get_controllers(self): + """Retrieve a mapping of controller labels to their references""" + controllers = list() + try: + rc, controllers = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/id" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + controllers.sort() + controllers_dict = {} + i = ord("A") + for controller in controllers: + label = chr(i) + controllers_dict[label] = controller + i += 1 + + return controllers_dict + + def get_target_interface(self): + """Retrieve the targeted controller interface""" + if self.get_target_interface_cache is None: + ifaces = self.get_nvmeof_interfaces() + controller_id = self.get_controllers()[self.controller] + + controller_ifaces = [] + for iface in ifaces: + if iface["controller_id"] == controller_id: + controller_ifaces.append(iface) + + sorted_controller_ifaces = sorted(controller_ifaces, key=lambda x: x["channel"]) + if self.channel < 1 or self.channel > len(controller_ifaces): + status_msg = ", ".join(["%s (link %s)" % (index + 1, iface["link_status"]) + for index, iface in enumerate(sorted_controller_ifaces)]) + self.module.fail_json(msg="Invalid controller %s NVMe channel. Available channels: %s, Array Id [%s]." + % (self.controller, status_msg, self.ssid)) + + self.get_target_interface_cache = sorted_controller_ifaces[self.channel - 1] + + return self.get_target_interface_cache + + def update(self): + """Update the storage system's controller nvme interface if needed.""" + update_required = False + body = {} + + iface = self.get_target_interface() + if iface["properties"]["provider"] == "providerInfiniband": + if (iface["properties"]["ibProperties"]["ipAddressData"]["addressType"] != "ipv4" or + iface["properties"]["ibProperties"]["ipAddressData"]["ipv4Data"]["ipv4Address"] != self.address): + update_required = True + body = {"settings": {"ibSettings": {"networkSettings": {"ipv4Address": self.address}}}} + + elif iface["properties"]["provider"] == "providerRocev2": + interface_data = iface["interface"]["interfaceData"]["ethernetData"] + current_speed = interface_data["currentInterfaceSpeed"].lower().replace("speed", "").replace("gig", "") + interface_supported_speeds = [str(speed).lower().replace("speed", "").replace("gig", "") + for speed in interface_data["supportedInterfaceSpeeds"]] + if self.speed not in interface_supported_speeds: + self.module.fail_json(msg="Unsupported interface speed! Options %s. Array [%s]." + % (interface_supported_speeds, self.ssid)) + + roce_properties = iface["properties"]["roceV2Properties"] + if self.enabled != roce_properties["ipv4Enabled"]: + update_required = True + if self.address and roce_properties["ipv4Data"]["ipv4AddressConfigMethod"] != self.config_method: + update_required = True + if self.address and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4Address"] != self.address: + update_required = True + if self.subnet_mask and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4SubnetMask"] != self.subnet_mask: + update_required = True + if self.gateway and roce_properties["ipv4Data"]["ipv4AddressData"]["ipv4GatewayAddress"] != self.gateway: + update_required = True + if self.speed and self.speed != current_speed: + update_required = True + if (self.mtu and iface["interface"]["interfaceData"]["ethernetData"][ + "maximumFramePayloadSize"] != self.mtu): + update_required = True + + if update_required: + body = {"id": iface["reference"], "settings": {"roceV2Settings": { + "networkSettings": {"ipv4Enabled": self.enabled, + "ipv4Settings": {"configurationMethod": self.config_method}}}}} + + if self.config_method == "configStatic": + if self.address: + body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update( + {"address": self.address}) + if self.subnet_mask: + body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update( + {"subnetMask": self.subnet_mask}) + if self.gateway: + body["settings"]["roceV2Settings"]["networkSettings"]["ipv4Settings"].update( + {"gatewayAddress": self.gateway}) + if self.speed: + if self.speed == "auto": + body["settings"]["roceV2Settings"]["networkSettings"].update({"interfaceSpeed": "speedAuto"}) + else: + body["settings"]["roceV2Settings"]["networkSettings"].update( + {"interfaceSpeed": "speed%sgig" % self.speed}) + if self.mtu: + body["settings"]["roceV2Settings"]["networkSettings"].update({"interfaceMtu": self.mtu}) + + if update_required and not self.module.check_mode: + try: + rc, iface = self.request("storage-systems/%s/nvmeof/interfaces/%s" % (self.ssid, iface["reference"]), + method="POST", data=body) + except Exception as error: + self.module.fail_json(msg="Failed to configure interface. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + self.module.exit_json(msg="NVMeoF interface settings have been updated.", changed=update_required) + self.module.exit_json(msg="No changes have been made.", changed=update_required) + + +def main(): + nvmeof_interface = NetAppESeriesNvmeInterface() + nvmeof_interface.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py new file mode 100644 index 000000000..715467e18 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_drive_firmware_upload.py @@ -0,0 +1,150 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_proxy_drive_firmware_upload +short_description: NetApp E-Series manage proxy drive firmware files +description: + - Ensure drive firmware files are available on SANtricity Web Service Proxy. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_proxy_doc +options: + firmware: + description: + - This option can be a list of file paths and/or directories containing drive firmware. + - Note that only files with the extension .dlp will be attempted to be added to the proxy; all other files will be ignored. + - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/ + type: list + required: false +""" +EXAMPLES = """ +- name: Ensure correct firmware versions + na_santricity_proxy_drive_firmware_upload: + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + firmware: + - "path/to/drive_firmware_file1.dlp" + - "path/to/drive_firmware_file2.dlp" + - "path/to/drive_firmware_directory" +""" +RETURN = """ +msg: + description: Whether any changes have been made to the collection of drive firmware on SANtricity Web Services Proxy. + type: str + returned: always +""" +import os +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request + + +class NetAppESeriesProxyDriveFirmwareUpload(NetAppESeriesModule): + WAIT_TIMEOUT_SEC = 60 * 15 + + def __init__(self): + ansible_options = dict(firmware=dict(type="list", required=False)) + + super(NetAppESeriesProxyDriveFirmwareUpload, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True, + proxy_specific_task=True) + args = self.module.params + self.firmware = args["firmware"] + + self.files = None + self.add_files = [] + self.remove_files = [] + self.upload_failures = [] + + def determine_file_paths(self): + """Determine all the drive firmware file paths.""" + self.files = {} + if self.firmware: + for path in self.firmware: + + if not os.path.exists(path): + self.module.fail_json(msg="Drive firmware file does not exist! File [%s]" % path) + elif os.path.isdir(path): + if not path.endswith("/"): + path = path + "/" + for dir_filename in os.listdir(path): + if ".dlp" in dir_filename: + self.files.update({dir_filename: path + dir_filename}) + elif ".dlp" in path: + name = os.path.basename(path) + self.files.update({name: path}) + + self.module.warn("%s" % self.files) + + def determine_changes(self): + """Determine whether drive firmware files should be uploaded to the proxy.""" + try: + rc, results = self.request("files/drive") + current_files = [result["fileName"] for result in results] + + for current_file in current_files: + if current_file not in self.files.keys(): + self.remove_files.append(current_file) + + for expected_file in self.files.keys(): + if expected_file not in current_files: + self.add_files.append(expected_file) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve proxy drive firmware file list. Error [%s]" % error) + + def upload_files(self): + """Add drive firmware file to the proxy.""" + for filename in self.add_files: + firmware_name = os.path.basename(filename) + files = [("file", firmware_name, self.files[filename])] + headers, data = create_multipart_formdata(files) + try: + rc, response = self.request("/files/drive", method="POST", headers=headers, data=data) + except Exception as error: + self.upload_failures.append(filename) + self.module.warn("Failed to upload drive firmware file. File [%s]." % firmware_name) + + def delete_files(self): + """Remove drive firmware file to the proxy.""" + for filename in self.remove_files: + try: + rc, response = self.request("files/drive/%s" % filename, method="DELETE") + except Exception as error: + self.upload_failures.append(filename) + self.module.warn("Failed to delete drive firmware file. File [%s]" % filename) + + def apply(self): + """Apply state to the web services proxy.""" + change_required = False + if not self.is_proxy(): + self.module.fail_json(msg="Module can only be executed against SANtricity Web Services Proxy.") + + self.determine_file_paths() + self.determine_changes() + + if self.add_files or self.remove_files: + change_required = True + + if change_required and not self.module.check_mode: + self.upload_files() + self.delete_files() + + self.module.exit_json(changed=change_required, files_added=self.add_files, files_removed=self.remove_files) + + +def main(): + proxy_firmware_upload = NetAppESeriesProxyDriveFirmwareUpload() + proxy_firmware_upload.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py new file mode 100644 index 000000000..100b1f051 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_firmware_upload.py @@ -0,0 +1,149 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_proxy_firmware_upload +short_description: NetApp E-Series manage proxy firmware uploads. +description: + - Ensure specific firmware versions are available on SANtricity Web Services Proxy. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_proxy_doc +options: + firmware: + description: + - List of paths and/or directories containing firmware/NVSRAM files. + - All firmware/NVSRAM files that are not specified will be removed from the proxy if they exist. + type: list + required: false +""" +EXAMPLES = """ +- name: Ensure proxy has the expected firmware versions. + na_santricity_proxy_firmware_upload: + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + firmware: + - "path/to/firmware/dlp_files" + - "path/to/nvsram.dlp" + - "path/to/firmware.dlp" +""" +RETURN = """ +msg: + description: Status and version of firmware and NVSRAM. + type: str + returned: always + sample: +""" +import os + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule, create_multipart_formdata, request + + +class NetAppESeriesProxyFirmwareUpload(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(firmware=dict(type="list", required=False)) + super(NetAppESeriesProxyFirmwareUpload, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True, + proxy_specific_task=True) + + args = self.module.params + self.firmware = args["firmware"] + self.files = None + self.add_files = [] + self.remove_files = [] + self.upload_failures = [] + + def determine_file_paths(self): + """Determine all the drive firmware file paths.""" + self.files = {} + if self.firmware: + for firmware_path in self.firmware: + + if not os.path.exists(firmware_path): + self.module.fail_json(msg="Drive firmware file does not exist! File [%s]" % firmware_path) + elif os.path.isdir(firmware_path): + if not firmware_path.endswith("/"): + firmware_path = firmware_path + "/" + + for dir_filename in os.listdir(firmware_path): + if ".dlp" in dir_filename: + self.files.update({dir_filename: firmware_path + dir_filename}) + elif ".dlp" in firmware_path: + name = os.path.basename(firmware_path) + self.files.update({name: firmware_path}) + + def determine_changes(self): + """Determine whether files need to be added or removed.""" + try: + rc, results = self.request("firmware/cfw-files") + current_files = [result["filename"] for result in results] + + for current_file in current_files: + if current_file not in self.files.keys(): + self.remove_files.append(current_file) + + for expected_file in self.files.keys(): + if expected_file not in current_files: + self.add_files.append(expected_file) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve current firmware file listing.") + + def upload_files(self): + """Upload firmware and nvsram file.""" + for filename in self.add_files: + fields = [("validate", "true")] + files = [("firmwareFile", filename, self.files[filename])] + headers, data = create_multipart_formdata(files=files, fields=fields) + try: + rc, response = self.request("firmware/upload/", method="POST", data=data, headers=headers) + except Exception as error: + self.upload_failures.append(filename) + self.module.warn("Failed to upload firmware file. File [%s]" % filename) + + def delete_files(self): + """Remove firmware and nvsram file.""" + for filename in self.remove_files: + try: + rc, response = self.request("firmware/upload/%s" % filename, method="DELETE") + except Exception as error: + self.upload_failures.append(filename) + self.module.warn("Failed to delete firmware file. File [%s]" % filename) + + def apply(self): + """Upgrade controller firmware.""" + change_required = False + if not self.is_proxy(): + self.module.fail_json(msg="Module can only be executed against SANtricity Web Services Proxy.") + + self.determine_file_paths() + self.determine_changes() + if self.add_files or self.remove_files: + change_required = True + + if change_required and not self.module.check_mode: + self.upload_files() + self.delete_files() + + if self.upload_failures: + self.module.fail_json(msg="Some file failed to be uploaded! changed=%s, Files_added [%s]. Files_removed [%s]. Upload_failures [%s]" + % (change_required, self.add_files, self.remove_files, self.upload_failures)) + self.module.exit_json(changed=change_required, files_added=self.add_files, files_removed=self.remove_files) + + +def main(): + proxy_firmware_upload = NetAppESeriesProxyFirmwareUpload() + proxy_firmware_upload.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py new file mode 100644 index 000000000..b572fe950 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_proxy_systems.py @@ -0,0 +1,586 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_proxy_systems +short_description: NetApp E-Series manage SANtricity web services proxy storage arrays +description: + - Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_proxy_doc +options: + add_discovered_systems: + description: + - This flag will force all discovered storage systems to be added to SANtricity Web Services Proxy. + type: bool + required: false + default: false + systems: + description: + - List of storage system information which defines which systems should be added on SANtricity Web Services Proxy. + - Accepts a simple serial number list or list of dictionary containing at minimum the serial or addresses key from the sub-option list. + - Note that the serial number will be used as the storage system identifier when an identifier is not specified. + - When I(add_discovered_systems == False) and any system serial number not supplied that is discovered will be removed from the proxy. + type: list + required: False + default: [] + suboptions: + ssid: + description: + - This is the Web Services Proxy's identifier for a storage system. + - When ssid is not specified then either the serial or first controller IPv4 address will be used instead. + type: str + required: false + serial: + description: + - Storage system's serial number which can be located on the top of every NetApp E-Series enclosure. + - Include any leading zeros. + - Mutually exclusive with the sub-option address. + type: str + required: false + addresses: + description: + - List of storage system's IPv4 addresses. + - Mutually exclusive with the sub-option serial. + type: list + required: false + password: + description: + - This is the storage system admin password. + - When not provided I(default_password) will be used. + - The storage system admin password will be set on the device itself with the provided admin password if it is not set. + type: str + required: false + tags: + description: + - Optional meta tags to associate to the storage system + type: dict + required: false + subnet_mask: + description: + - This is the IPv4 search range for discovering E-Series storage arrays. + - IPv4 subnet mask specified in CIDR form. Example 192.168.1.0/24 would search the range 192.168.1.0 to 192.168.1.255. + - Be sure to include all management paths in the search range. + type: str + required: false + password: + description: + - Default storage system password which will be used anytime when password has not been provided in the I(systems) sub-options. + - The storage system admin password will be set on the device itself with the provided admin password if it is not set. + type: str + required: false + tags: + description: + - Default meta tags to associate with all storage systems if not otherwise specified in I(systems) sub-options. + type: dict + required: false + accept_certificate: + description: + - Accept the storage system's certificate automatically even when it is self-signed. + - Use M(na_santricity_certificates) to add certificates to SANtricity Web Services Proxy. + - SANtricity Web Services Proxy will fail to add any untrusted storage system. + type: bool + required: false + default: true +""" + +EXAMPLES = """ +--- + - name: Add storage systems to SANtricity Web Services Proxy + na_santricity_proxy_systems: + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + subnet_mask: 192.168.1.0/24 + password: password + tags: + tag: value + accept_certificate: True + systems: + - ssid: "system1" + serial: "056233035640" + password: "asecretpassword" + tags: + use: corporate + location: sunnyvale + - ssid: "system2" + addresses: + - 192.168.1.100 + - 192.168.2.100 # Second is not be required. It will be discovered + password: "anothersecretpassword" + - serial: "021324673799" + - "021637323454" + - name: Add storage system to SANtricity Web Services Proxy with serial number list only. The serial numbers will be used to identify each system. + na_santricity_proxy_systems: + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + subnet_mask: 192.168.1.0/24 + password: password + accept_certificate: True + systems: + - "1144FG123018" + - "721716500123" + - "123540006043" + - "112123001239" + - name: Add all discovered storage system to SANtricity Web Services Proxy found in the IP address range 192.168.1.0 to 192.168.1.255. + na_santricity_proxy_systems: + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + add_discovered_systems: True + subnet_mask: 192.168.1.0/24 + password: password + accept_certificate: True +""" +RETURN = """ +msg: + description: Description of actions performed. + type: str + returned: always + sample: "Storage systems [system1, system2, 1144FG123018, 721716500123, 123540006043, 112123001239] were added." +""" +import json +import threading + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native +from time import sleep + +try: + import ipaddress +except ImportError: + HAS_IPADDRESS = False +else: + HAS_IPADDRESS = True + + +class NetAppESeriesProxySystems(NetAppESeriesModule): + DEFAULT_CONNECTION_TIMEOUT_SEC = 30 + DEFAULT_GRAPH_DISCOVERY_TIMEOUT = 30 + DEFAULT_PASSWORD_STATE_TIMEOUT = 30 + DEFAULT_DISCOVERY_TIMEOUT_SEC = 300 + + def __init__(self): + ansible_options = dict(add_discovered_systems=dict(type="bool", required=False, default=False), + subnet_mask=dict(type="str", required=False), + password=dict(type="str", required=False, default="", no_log=True), + tags=dict(type="dict", required=False), + accept_certificate=dict(type="bool", required=False, default=True), + systems=dict(type="list", required=False, default=[], suboptions=dict(ssid=dict(type="str", required=False), + serial=dict(type="str", required=False), + addresses=dict(type="list", required=False), + password=dict(type="str", required=False, no_log=True), + tags=dict(type="dict", required=False)))) + + super(NetAppESeriesProxySystems, self).__init__(ansible_options=ansible_options, + web_services_version="04.10.0000.0000", + supports_check_mode=True, + proxy_specific_task=True) + args = self.module.params + self.add_discovered_systems = args["add_discovered_systems"] + self.subnet_mask = args["subnet_mask"] + self.accept_certificate = args["accept_certificate"] + self.default_password = args["password"] + + self.default_meta_tags = [] + if "tags" in args and args["tags"]: + for key in args["tags"].keys(): + if isinstance(args["tags"][key], list): + self.default_meta_tags.append({"key": key, "valueList": args["tags"][key]}) + else: + self.default_meta_tags.append({"key": key, "valueList": [args["tags"][key]]}) + self.default_meta_tags = sorted(self.default_meta_tags, key=lambda x: x["key"]) + + self.undiscovered_systems = [] + self.systems_to_remove = [] + self.systems_to_update = [] + self.systems_to_add = [] + + self.serial_numbers = [] + self.systems = [] + if args["systems"]: + for system in args["systems"]: + + if isinstance(system, str): # system is a serial number + self.serial_numbers.append(system) + self.systems.append({"ssid": system, + "serial": system, + "password": self.default_password, + "password_valid": None, + "password_set": None, + "stored_password_valid": None, + "meta_tags": self.default_meta_tags, + "controller_addresses": [], + "embedded_available": None, + "accept_certificate": False, + "current_info": {}, + "changes": {}, + "updated_required": False, + "failed": False, + "discovered": False}) + elif isinstance(system, dict): # system is a dictionary of system details + if "ssid" not in system: + if "serial" in system and system["serial"]: + system.update({"ssid": system["serial"]}) + elif "addresses" in system and system["addresses"]: + system.update({"ssid": system["addresses"][0]}) + if "password" not in system: + system.update({"password": self.default_password}) + + if "serial" in system and system["serial"]: + self.serial_numbers.append(system["serial"]) + + # Structure meta tags for Web Services + meta_tags = self.default_meta_tags + if "meta_tags" in system and system["meta_tags"]: + for key in system["meta_tags"].keys(): + if isinstance(system["meta_tags"][key], list): + meta_tags.append({"key": key, "valueList": system["meta_tags"][key]}) + else: + meta_tags.append({"key": key, "valueList": [system["meta_tags"][key]]}) + meta_tags = sorted(meta_tags, key=lambda x: x["key"]) + + self.systems.append({"ssid": str(system["ssid"]), + "serial": system["serial"] if "serial" in system else "", + "password": system["password"], + "password_valid": None, + "password_set": None, + "stored_password_valid": None, + "meta_tags": meta_tags, + "controller_addresses": system["addresses"] if "addresses" in system else [], + "embedded_available": None, + "accept_certificate": False, + "current_info": {}, + "changes": {}, + "updated_required": False, + "failed": False, + "discovered": False}) + else: + self.module.fail_json(msg="Invalid system! All systems must either be a simple serial number or a dictionary. Failed system: %s" % system) + + # Update default request headers + self.DEFAULT_HEADERS.update({"x-netapp-password-validate-method": "none"}) + + def discover_array(self): + """Search for array using the world wide identifier.""" + subnet = ipaddress.ip_network(u"%s" % self.subnet_mask) + + try: + rc, request_id = self.request("discovery", method="POST", data={"startIP": str(subnet[0]), "endIP": str(subnet[-1]), + "connectionTimeout": self.DEFAULT_CONNECTION_TIMEOUT_SEC}) + + # Wait for discover to complete + discovered_systems = None + try: + for iteration in range(self.DEFAULT_DISCOVERY_TIMEOUT_SEC): + rc, discovered_systems = self.request("discovery?requestId=%s" % request_id["requestId"]) + if not discovered_systems["discoverProcessRunning"]: + break + sleep(1) + else: + self.module.fail_json(msg="Timeout waiting for array discovery process. Subnet [%s]" % self.subnet_mask) + except Exception as error: + self.module.fail_json(msg="Failed to get the discovery results. Error [%s]." % to_native(error)) + + if not discovered_systems: + self.module.warn("Discovery found no systems. IP starting address [%s]. IP ending address: [%s]." % (str(subnet[0]), str(subnet[-1]))) + else: + # Add all newly discovered systems. This is ignore any supplied systems to prevent any duplicates. + if self.add_discovered_systems: + for discovered_system in discovered_systems["storageSystems"]: + if discovered_system["serialNumber"] not in self.serial_numbers: + self.systems.append({"ssid": discovered_system["serialNumber"], + "serial": discovered_system["serialNumber"], + "password": self.default_password, + "password_valid": None, + "password_set": None, + "stored_password_valid": None, + "meta_tags": self.default_meta_tags, + "controller_addresses": [], + "embedded_available": None, + "accept_certificate": False, + "current_info": {}, + "changes": {}, + "updated_required": False, + "failed": False, + "discovered": False}) + + # Update controller_addresses + for system in self.systems: + for discovered_system in discovered_systems["storageSystems"]: + if (system["serial"] == discovered_system["serialNumber"] or + (system["controller_addresses"] and + all([address in discovered_system["ipAddresses"] for address in system["controller_addresses"]]))): + system["controller_addresses"] = sorted(discovered_system["ipAddresses"]) + system["embedded_available"] = "https" in discovered_system["supportedManagementPorts"] + system["accept_certificate"] = system["embedded_available"] and self.accept_certificate + system["discovered"] = True + break + else: # Remove any undiscovered system from the systems list + + self.undiscovered_systems.append(system["ssid"]) + # self.systems.remove(system) + + except Exception as error: + self.module.fail_json(msg="Failed to initiate array discovery. Error [%s]." % to_native(error)) + + def update_storage_systems_info(self): + """Get current web services proxy storage systems.""" + try: + rc, existing_systems = self.request("storage-systems") + + # Mark systems for adding or removing + for system in self.systems: + for existing_system in existing_systems: + if system["ssid"] == existing_system["id"]: + system["current_info"] = existing_system + + if system["current_info"]["passwordStatus"] in ["unknown", "securityLockout"]: + system["failed"] = True + self.module.warn("Skipping storage system [%s] because of current password status [%s]" + % (system["ssid"], system["current_info"]["passwordStatus"])) + if system["current_info"]["metaTags"]: + system["current_info"]["metaTags"] = sorted(system["current_info"]["metaTags"], key=lambda x: x["key"]) + break + else: + self.systems_to_add.append(system) + + # Mark systems for removing + for existing_system in existing_systems: + for system in self.systems: + if existing_system["id"] == system["ssid"]: + + # Leave existing but undiscovered storage systems alone and throw a warning. + if existing_system["id"] in self.undiscovered_systems: + self.undiscovered_systems.remove(existing_system["id"]) + self.module.warn("Expected storage system exists on the proxy but was failed to be discovered. Array [%s]." % existing_system["id"]) + break + else: + self.systems_to_remove.append(existing_system["id"]) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage systems. Error [%s]." % to_native(error)) + + def set_password(self, system): + """Determine whether password has been set and, if it hasn't been set, set it.""" + if system["embedded_available"] and system["controller_addresses"]: + for url in ["https://%s:8443/devmgr" % system["controller_addresses"][0], + "https://%s:443/devmgr" % system["controller_addresses"][0], + "http://%s:8080/devmgr" % system["controller_addresses"][0]]: + try: + rc, response = self._request("%s/utils/login?uid=admin&xsrf=false&onlycheck=true" % url, ignore_errors=True, url_username="admin", + url_password="", validate_certs=False) + + if rc == 200: # successful login without password + system["password_set"] = False + if system["password"]: + try: + rc, storage_system = self._request("%s/v2/storage-systems/1/passwords" % url, method="POST", url_username="admin", + headers=self.DEFAULT_HEADERS, url_password="", validate_certs=False, + data=json.dumps({"currentAdminPassword": "", "adminPassword": True, + "newPassword": system["password"]})) + + except Exception as error: + system["failed"] = True + self.module.warn("Failed to set storage system password. Array [%s]." % system["ssid"]) + break + + elif rc == 401: # unauthorized + system["password_set"] = True + break + except Exception as error: + pass + else: + self.module.warn("Failed to retrieve array password state. Array [%s]." % system["ssid"]) + system["failed"] = True + + def update_system_changes(self, system): + """Determine whether storage system configuration changes are required """ + if system["current_info"]: + system["changes"] = dict() + + # Check if management paths should be updated + if (sorted(system["controller_addresses"]) != sorted(system["current_info"]["managementPaths"]) or + system["current_info"]["ip1"] not in system["current_info"]["managementPaths"] or + system["current_info"]["ip2"] not in system["current_info"]["managementPaths"]): + system["changes"].update({"controllerAddresses": system["controller_addresses"]}) + + # Check for expected meta tag count + if len(system["meta_tags"]) != len(system["current_info"]["metaTags"]): + if len(system["meta_tags"]) == 0: + system["changes"].update({"removeAllTags": True}) + else: + system["changes"].update({"metaTags": system["meta_tags"]}) + + # Check for expected meta tag key-values + else: + for index in range(len(system["meta_tags"])): + if (system["current_info"]["metaTags"][index]["key"] != system["meta_tags"][index]["key"] or + sorted(system["current_info"]["metaTags"][index]["valueList"]) != sorted(system["meta_tags"][index]["valueList"])): + system["changes"].update({"metaTags": system["meta_tags"]}) + break + + # Check whether CA certificate should be accepted + if system["accept_certificate"] and not all([controller["certificateStatus"] == "trusted" for controller in system["current_info"]["controllers"]]): + system["changes"].update({"acceptCertificate": True}) + + if system["id"] not in self.undiscovered_systems and system["changes"]: + self.systems_to_update.append(system) + + def add_system(self, system): + """Add basic storage system definition to the web services proxy.""" + self.set_password(system) + + body = {"id": system["ssid"], + "controllerAddresses": system["controller_addresses"], + "password": system["password"]} + if system["accept_certificate"]: # Set only if embedded is available and accept_certificates==True + body.update({"acceptCertificate": system["accept_certificate"]}) + if system["meta_tags"]: + body.update({"metaTags": system["meta_tags"]}) + + try: + rc, storage_system = self.request("storage-systems", method="POST", data=body) + except Exception as error: + self.module.warn("Failed to add storage system. Array [%s]. Error [%s]" % (system["ssid"], to_native(error))) + return # Skip the password validation. + + # Ensure the password is validated + for retries in range(5): + sleep(1) + try: + rc, storage_system = self.request("storage-systems/%s/validatePassword" % system["ssid"], method="POST") + break + except Exception as error: + continue + else: + self.module.warn("Failed to validate password status. Array [%s]. Error [%s]" % (system["ssid"], to_native(error))) + + def update_system(self, system): + """Update storage system configuration.""" + try: + rc, storage_system = self.request("storage-systems/%s" % system["ssid"], method="POST", data=system["changes"]) + except Exception as error: + self.module.warn("Failed to update storage system. Array [%s]. Error [%s]" % (system["ssid"], to_native(error))) + + def remove_system(self, ssid): + """Remove storage system.""" + try: + rc, storage_system = self.request("storage-systems/%s" % ssid, method="DELETE") + except Exception as error: + self.module.warn("Failed to remove storage system. Array [%s]. Error [%s]." % (ssid, to_native(error))) + + def apply(self): + """Determine whether changes are required and, if necessary, apply them.""" + missing_packages = [] + if not HAS_IPADDRESS: + missing_packages.append("ipaddress") + + if missing_packages: + self.module.fail_json(msg="Python packages are missing! Packages [%s]." % ", ".join(missing_packages)) + + if self.is_embedded(): + self.module.fail_json(msg="Cannot add/remove storage systems to SANtricity Web Services Embedded instance.") + + if self.add_discovered_systems or self.systems: + if self.subnet_mask: + self.discover_array() + self.update_storage_systems_info() + + # Determine whether the storage system requires updating + thread_pool = [] + for system in self.systems: + if not system["failed"]: + thread = threading.Thread(target=self.update_system_changes, args=(system,)) + thread_pool.append(thread) + thread.start() + for thread in thread_pool: + thread.join() + else: + self.update_storage_systems_info() + + changes_required = False + if self.systems_to_add or self.systems_to_update or self.systems_to_remove: + changes_required = True + + if changes_required and not self.module.check_mode: + add_msg = "" + update_msg = "" + remove_msg = "" + + # Remove storage systems + if self.systems_to_remove: + ssids = [] + thread_pool = [] + for ssid in self.systems_to_remove: + thread = threading.Thread(target=self.remove_system, args=(ssid,)) + thread_pool.append(thread) + thread.start() + ssids.append(ssid) + for thread in thread_pool: + thread.join() + if ssids: + remove_msg = "system%s removed: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids)) + + thread_pool = [] + + # Add storage systems + if self.systems_to_add: + ssids = [] + for system in self.systems_to_add: + if not system["failed"]: + thread = threading.Thread(target=self.add_system, args=(system,)) + thread_pool.append(thread) + thread.start() + ssids.append(system["ssid"]) + if ssids: + add_msg = "system%s added: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids)) + + # Update storage systems + if self.systems_to_update: + ssids = [] + for system in self.systems_to_update: + if not system["failed"]: + thread = threading.Thread(target=self.update_system, args=(system,)) + thread_pool.append(thread) + thread.start() + ssids.append(system["ssid"]) + if ssids: + update_msg = "system%s updated: %s" % ("s" if len(ssids) > 1 else "", ", ".join(ssids)) + + # Wait for storage systems to be added or updated + for thread in thread_pool: + thread.join() + + # Report module actions + if self.undiscovered_systems: + undiscovered_msg = "system%s undiscovered: %s" % ("s " if len(self.undiscovered_systems) > 1 else "", ", ".join(self.undiscovered_systems)) + self.module.fail_json(msg=(", ".join([msg for msg in [add_msg, update_msg, remove_msg, undiscovered_msg] if msg])), changed=changes_required) + + self.module.exit_json(msg=", ".join([msg for msg in [add_msg, update_msg, remove_msg] if msg]), changed=changes_required) + + # Report no changes + if self.undiscovered_systems: + self.module.fail_json(msg="No changes were made; however the following system(s) failed to be discovered: %s." + % self.undiscovered_systems, changed=changes_required) + self.module.exit_json(msg="No changes were made.", changed=changes_required) + + +def main(): + proxy_systems = NetAppESeriesProxySystems() + proxy_systems.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_server_certificate.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_server_certificate.py new file mode 100644 index 000000000..909819ce2 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_server_certificate.py @@ -0,0 +1,539 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +module: na_santricity_server_certificate +short_description: NetApp E-Series manage the storage system's server SSL certificates. +description: Manage NetApp E-Series storage system's server SSL certificates. +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are represented alphabetically, with the first controller as A, the second as B, and so on. + - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard limitation and could change in the future. + - I(controller) must be specified unless managing SANtricity Web Services Proxy (ie I(ssid="proxy")) + choices: + - A + - B + type: str + required: false + certificates: + description: + - Unordered list of all server certificate files which include PEM and DER encoded certificates as well as private keys. + - When I(certificates) is not defined then a self-signed certificate will be expected. + type: list + required: false + passphrase: + description: + - Passphrase for PEM encoded private key encryption. + - If I(passphrase) is not supplied then Ansible will prompt for private key certificate. + type: str + required: false +notes: + - Set I(ssid=='0') or I(ssid=='proxy') to specifically reference SANtricity Web Services Proxy. + - Certificates can be the following filetypes - PEM (.pem, .crt, .cer, or .key) or DER (.der or .cer) + - When I(certificates) is not defined then a self-signed certificate will be expected. +requirements: + - cryptography +""" +EXAMPLES = """ +- name: Ensure signed certificate is installed. + na_santricity_server_certificate: + ssid: 1 + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + controller: A + certificates: + - 'root_auth_cert.pem' + - 'intermediate_auth1_cert.pem' + - 'intermediate_auth2_cert.pem' + - 'public_cert.pem' + - 'private_key.pem' + passphrase: keypass +- name: Ensure signed certificate bundle is installed. + na_santricity_server_certificate: + ssid: 1 + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + controller: B + certificates: + - 'cert_bundle.pem' + passphrase: keypass +- name: Ensure storage system generated self-signed certificate is installed. + na_santricity_server_certificate: + ssid: 1 + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + controller: A +""" +RETURN = """ +changed: + description: Whether changes have been made. + type: bool + returned: always + sample: true +signed_server_certificate: + description: Whether the public server certificate is signed. + type: bool + returned: always + sample: true +added_certificates: + description: Any SSL certificates that were added. + type: list + returned: always + sample: ['added_certificiate.crt'] +removed_certificates: + description: Any SSL certificates that were removed. + type: list + returned: always + sample: ['removed_certificiate.crt'] +""" + +import binascii +import random +import re + +from ansible.module_utils import six +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native +from time import sleep + +try: + import cryptography + from cryptography import x509 + from cryptography.hazmat.primitives import serialization + from cryptography.hazmat.backends import default_backend +except ImportError: + HAS_CRYPTOGRAPHY = False +else: + HAS_CRYPTOGRAPHY = True + + +def create_multipart_formdata(file_details): + """Create the data for a multipart/form request for a certificate.""" + boundary = "---------------------------" + "".join([str(random.randint(0, 9)) for x in range(30)]) + data_parts = list() + data = None + + if six.PY2: # Generate payload for Python 2 + newline = "\r\n" + for name, filename, content in file_details: + data_parts.extend(["--%s" % boundary, + 'Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename), + "Content-Type: application/octet-stream", + "", + content]) + data_parts.extend(["--%s--" % boundary, ""]) + data = newline.join(data_parts) + + else: + newline = six.b("\r\n") + for name, filename, content in file_details: + data_parts.extend([six.b("--%s" % boundary), + six.b('Content-Disposition: form-data; name="%s"; filename="%s"' % (name, filename)), + six.b("Content-Type: application/octet-stream"), + six.b(""), + content]) + data_parts.extend([six.b("--%s--" % boundary), b""]) + data = newline.join(data_parts) + + headers = { + "Content-Type": "multipart/form-data; boundary=%s" % boundary, + "Content-Length": str(len(data))} + + return headers, data + + +class NetAppESeriesServerCertificate(NetAppESeriesModule): + RESET_SSL_CONFIG_TIMEOUT_SEC = 3 * 60 + + def __init__(self): + ansible_options = dict(controller=dict(type="str", required=False, choices=["A", "B"]), + certificates=dict(type="list", required=False), + passphrase=dict(type="str", required=False, no_log=True)) + + super(NetAppESeriesServerCertificate, self).__init__(ansible_options=ansible_options, + web_services_version="05.00.0000.0000", + supports_check_mode=True) + args = self.module.params + self.controller = args["controller"] + self.certificates = args["certificates"] if "certificates" in args.keys() else list() + self.passphrase = args["passphrase"] if "passphrase" in args.keys() else None + + # Check whether request needs to be forwarded on to the controller web services rest api. + self.url_path_prefix = "" + self.url_path_suffix = "" + if self.is_proxy(): + if self.ssid.lower() in ["0", "proxy"]: + self.url_path_suffix = "?controller=auto" + elif self.controller is not None: + self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid + self.url_path_suffix = "?controller=%s" % self.controller.lower() + else: + self.module.fail_json(msg="Invalid options! You must specify which controller's certificates to modify. Array [%s]." % self.ssid) + elif self.controller is None: + self.module.fail_json(msg="Invalid options! You must specify which controller's certificates to modify. Array [%s]." % self.ssid) + + self.cache_get_current_certificates = None + self.cache_is_controller_alternate = None + self.cache_is_public_server_certificate_signed = None + + def get_controllers(self): + """Retrieve a mapping of controller labels to their controller slot.""" + controllers_dict = {} + controllers = [] + try: + rc, controllers = self.request("storage-systems/%s/controllers" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + for controller in controllers: + slot = controller['physicalLocation']['slot'] + letter = chr(slot + 64) + controllers_dict.update({letter: slot}) + + return controllers_dict + + def check_controller(self): + """Is the effected controller the alternate controller.""" + controllers_info = self.get_controllers() + try: + rc, about = self.request("utils/about", rest_api_path=self.DEFAULT_BASE_PATH) + self.url_path_suffix = "?alternate=%s" % ("true" if controllers_info[self.controller] != about["controllerPosition"] else "false") + except Exception as error: + self.module.fail_json(msg="Failed to retrieve accessing controller slot information. Array [%s]." % self.ssid) + + @staticmethod + def sanitize_distinguished_name(dn): + """Generate a sorted distinguished name string to account for different formats/orders.""" + dn = re.sub(" *= *", "=", dn).lower() + dn = re.sub(", *(?=[a-zA-Z]+={1})", "---SPLIT_MARK---", dn) + dn_parts = dn.split("---SPLIT_MARK---") + dn_parts.sort() + return ",".join(dn_parts) + + def certificate_info_from_file(self, path): + """Determine the certificate info from the provided filepath.""" + certificates_info = {} + try: + # Treat file as PEM encoded file. + with open(path, "r") as fh: + line = fh.readline() + while line != "": + + # Add public certificates to bundle_info. + if re.search("^-+BEGIN CERTIFICATE-+$", line): + certificate = line + line = fh.readline() + while not re.search("^-+END CERTIFICATE-+$", line): + if line == "": + self.module.fail_json(msg="Invalid certificate! Path [%s]. Array [%s]." % (path, self.ssid)) + certificate += line + line = fh.readline() + certificate += line + if not six.PY2: + certificate = six.b(certificate) + info = x509.load_pem_x509_certificate(certificate, default_backend()) + certificates_info.update(self.certificate_info(info, certificate, path)) + + # Add private key to self.private_key. + elif re.search("^-+BEGIN.*PRIVATE KEY-+$", line): + pkcs8 = "BEGIN PRIVATE KEY" in line + pkcs8_encrypted = "BEGIN ENCRYPTED PRIVATE KEY" in line + key = line + line = fh.readline() + while not re.search("^-+END.*PRIVATE KEY-+$", line): + if line == "": + self.module.fail_json(msg="Invalid certificate! Array [%s]." % self.ssid) + key += line + line = fh.readline() + key += line + if not six.PY2: + key = six.b(key) + if self.passphrase: + self.passphrase = six.b(self.passphrase) + + # Check for PKCS8 PEM encoding. + if pkcs8 or pkcs8_encrypted: + try: + if pkcs8: + crypto_key = serialization.load_pem_private_key(key, password=None, backend=default_backend()) + else: + crypto_key = serialization.load_pem_private_key(key, password=self.passphrase, backend=default_backend()) + except ValueError as error: + self.module.fail_json(msg="Failed to load%sPKCS8 encoded private key. %s" + " Error [%s]." % (" encrypted " if pkcs8_encrypted else " ", + "Check passphrase." if pkcs8_encrypted else "", error)) + + key = crypto_key.private_bytes(encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()) + + # Check whether multiple private keys have been provided and fail if different + if "private_key" in certificates_info.keys() and certificates_info["private_key"] != key: + self.module.fail_json(msg="Multiple private keys have been provided! Array [%s]" % self.ssid) + else: + certificates_info.update({"private_key": key}) + + line = fh.readline() + + # Throw exception when no PEM certificates have been discovered. + if len(certificates_info) == 0: + raise Exception("Failed to discover a valid PEM encoded certificate or private key!") + + except Exception as error: + # Treat file as DER encoded certificate + try: + with open(path, "rb") as fh: + cert_info = x509.load_der_x509_certificate(fh.read(), default_backend()) + cert_data = cert_info.public_bytes(serialization.Encoding.PEM) + certificates_info.update(self.certificate_info(cert_info, cert_data, path)) + + # Throw exception when no DER encoded certificates have been discovered. + if len(certificates_info) == 0: + raise Exception("Failed to discover a valid DER encoded certificate!") + except Exception as error: + + # Treat file as DER encoded private key + try: + with open(path, "rb") as fh: + crypto_key = serialization.load_der_public_key(fh.read(), backend=default_backend()) + key = crypto_key.private_bytes(encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()) + certificates_info.update({"private_key": key}) + except Exception as error: + self.module.fail_json(msg="Invalid file type! File is neither PEM or DER encoded certificate/private key." + " Path [%s]. Array [%s]. Error [%s]." % (path, self.ssid, to_native(error))) + + return certificates_info + + def certificate_info(self, info, data, path): + """Load x509 certificate that is either encoded DER or PEM encoding and return the certificate fingerprint.""" + fingerprint = binascii.hexlify(info.fingerprint(info.signature_hash_algorithm)).decode("utf-8") + return {self.sanitize_distinguished_name(info.subject.rfc4514_string()): {"alias": fingerprint, "fingerprint": fingerprint, + "certificate": data, "path": path, + "issuer": self.sanitize_distinguished_name(info.issuer.rfc4514_string())}} + + def get_current_certificates(self): + """Determine the server certificates that exist on the storage system.""" + if self.cache_get_current_certificates is None: + current_certificates = [] + try: + rc, current_certificates = self.request(self.url_path_prefix + "certificates/server%s" % self.url_path_suffix) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve server certificates. Array [%s]." % self.ssid) + + self.cache_get_current_certificates = {} + for certificate in current_certificates: + certificate.update({"issuer": self.sanitize_distinguished_name(certificate["issuerDN"])}) + self.cache_get_current_certificates.update({self.sanitize_distinguished_name(certificate["subjectDN"]): certificate}) + + return self.cache_get_current_certificates + + def is_public_server_certificate_signed(self): + """Return whether the public server certificate is signed.""" + if self.cache_is_public_server_certificate_signed is None: + current_certificates = self.get_current_certificates() + + for certificate in current_certificates: + if current_certificates[certificate]["alias"] == "jetty": + self.cache_is_public_server_certificate_signed = current_certificates[certificate]["type"] == "caSigned" + break + + return self.cache_is_public_server_certificate_signed + + def get_expected_certificates(self): + """Determine effected certificates and return certificate list in the required submission order.""" + certificates_info = {} + existing_certificates = self.get_current_certificates() + + private_key = None + if self.certificates: + for path in self.certificates: + info = self.certificate_info_from_file(path) + if "private_key" in info.keys(): + if private_key is not None and info["private_key"] != private_key: + self.module.fail_json(msg="Multiple private keys have been provided! Array [%s]" % self.ssid) + else: + private_key = info.pop("private_key") + certificates_info.update(info) + + # Determine bundle certificate ordering. + ordered_certificates_info = [dict] * len(certificates_info) + ordered_certificates_info_index = len(certificates_info) - 1 + while certificates_info: + for certificate_subject in certificates_info.keys(): + + # Determine all remaining issuers. + remaining_issuer_list = [info["issuer"] for subject, info in existing_certificates.items()] + for subject, info in certificates_info.items(): + remaining_issuer_list.append(info["issuer"]) + + # Search for the next certificate that is not an issuer of the remaining certificates in certificates_info dictionary. + if certificate_subject not in remaining_issuer_list: + ordered_certificates_info[ordered_certificates_info_index] = certificates_info[certificate_subject] + certificates_info.pop(certificate_subject) + ordered_certificates_info_index -= 1 + break + else: # Add remaining root certificate if one exists. + for certificate_subject in certificates_info.keys(): + ordered_certificates_info[ordered_certificates_info_index] = certificates_info[certificate_subject] + ordered_certificates_info_index -= 1 + break + return {"private_key": private_key, "certificates": ordered_certificates_info} + + def determine_changes(self): + """Determine certificates that need to be added or removed from storage system's server certificates database.""" + if not self.is_proxy(): + self.check_controller() + existing_certificates = self.get_current_certificates() + expected = self.get_expected_certificates() + certificates = expected["certificates"] + + changes = {"change_required": False, + "signed_cert": True if certificates else False, + "private_key": expected["private_key"], + "public_cert": None, + "add_certs": [], + "remove_certs": []} + + # Determine whether any expected certificates are missing from the storage system's database. + if certificates: + + # Create a initial remove_cert list. + for existing_certificate_subject, existing_certificate in existing_certificates.items(): + changes["remove_certs"].append(existing_certificate["alias"]) + + # Determine expected certificates + last_certificate_index = len(certificates) - 1 + for certificate_index, certificate in enumerate(certificates): + for existing_certificate_subject, existing_certificate in existing_certificates.items(): + + if certificate_index == last_certificate_index: + if existing_certificate["alias"] == "jetty": + if (certificate["fingerprint"] != existing_certificate["shaFingerprint"] and + certificate["fingerprint"] != existing_certificate["sha256Fingerprint"]): + changes["change_required"] = True + changes["public_cert"] = certificate + changes["remove_certs"].remove(existing_certificate["alias"]) + break + + elif certificate["alias"] == existing_certificate["alias"]: + if (certificate["fingerprint"] != existing_certificate["shaFingerprint"] and + certificate["fingerprint"] != existing_certificate["sha256Fingerprint"]): + changes["add_certs"].append(certificate) + changes["change_required"] = True + changes["remove_certs"].remove(existing_certificate["alias"]) + break + + else: + changes["add_certs"].append(certificate) + changes["change_required"] = True + + # Determine whether new self-signed certificate needs to be generated. + elif self.is_public_server_certificate_signed(): + changes["change_required"] = True + + return changes + + def apply_self_signed_certificate(self): + """Install self-signed server certificate which is generated by the storage system itself.""" + try: + rc, resp = self.request(self.url_path_prefix + "certificates/reset%s" % self.url_path_suffix, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to reset SSL configuration back to a self-signed certificate! Array [%s]. Error [%s]." % (self.ssid, error)) + + def apply_signed_certificate(self, public_cert, private_key): + """Install authoritative signed server certificate whether csr is generated by storage system or not.""" + if private_key is None: + headers, data = create_multipart_formdata([("file", "signed_server_certificate", public_cert["certificate"])]) + else: + headers, data = create_multipart_formdata([("file", "signed_server_certificate", public_cert["certificate"]), + ("privateKey", "private_key", private_key)]) + + try: + rc, resp = self.request(self.url_path_prefix + "certificates/server%s&replaceMainServerCertificate=true" % self.url_path_suffix, + method="POST", headers=headers, data=data) + except Exception as error: + self.module.fail_json(msg="Failed to upload signed server certificate! Array [%s]. Error [%s]." % (self.ssid, error)) + + def upload_authoritative_certificates(self, certificate): + """Install all authoritative certificates.""" + headers, data = create_multipart_formdata([["file", certificate["alias"], certificate["certificate"]]]) + + try: + rc, resp = self.request(self.url_path_prefix + "certificates/server%s&alias=%s" % (self.url_path_suffix, certificate["alias"]), + method="POST", headers=headers, data=data) + except Exception as error: + self.module.fail_json(msg="Failed to upload certificate authority! Array [%s]. Error [%s]." % (self.ssid, error)) + + def remove_authoritative_certificates(self, alias): + """Delete all authoritative certificates.""" + try: + rc, resp = self.request(self.url_path_prefix + "certificates/server/%s%s" % (alias, self.url_path_suffix), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete certificate authority! Array [%s]. Error [%s]." % (self.ssid, error)) + + def reload_ssl_configuration(self): + """Asynchronously reloads the SSL configuration.""" + self.request(self.url_path_prefix + "certificates/reload%s" % self.url_path_suffix, method="POST", ignore_errors=True) + + for retry in range(int(self.RESET_SSL_CONFIG_TIMEOUT_SEC / 3)): + try: + rc, current_certificates = self.request(self.url_path_prefix + "certificates/server%s" % self.url_path_suffix) + except Exception as error: + sleep(3) + continue + break + else: + self.module.fail_json(msg="Failed to retrieve server certificates. Array [%s]." % self.ssid) + + def apply(self): + """Apply state changes to the storage array's truststore.""" + if not HAS_CRYPTOGRAPHY: + self.module.fail_json(msg="Python cryptography package are missing!") + + major, minor, patch = [int(item) for item in str(cryptography.__version__).split(".")] + if major < 2 or (major == 2 and minor < 5): + self.module.fail_json(msg="Python cryptography package version must greater than version 2.5! Version [%s]." % cryptography.__version__) + + changes = self.determine_changes() + if changes["change_required"] and not self.module.check_mode: + + if changes["signed_cert"]: + for certificate in changes["add_certs"]: + self.upload_authoritative_certificates(certificate) + for certificate_alias in changes["remove_certs"]: + self.remove_authoritative_certificates(certificate_alias) + if changes["public_cert"]: + self.apply_signed_certificate(changes["public_cert"], changes["private_key"]) + self.reload_ssl_configuration() + else: + self.apply_self_signed_certificate() + self.reload_ssl_configuration() + + self.module.exit_json(changed=changes["change_required"], + signed_server_certificate=changes["signed_cert"], + added_certificates=[cert["alias"] for cert in changes["add_certs"]], + removed_certificates=changes["remove_certs"]) + + +def main(): + client_certs = NetAppESeriesServerCertificate() + client_certs.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_snapshot.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_snapshot.py new file mode 100644 index 000000000..67356c9dc --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_snapshot.py @@ -0,0 +1,1578 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +DOCUMENTATION = """ +module: na_santricity_snapshot +short_description: NetApp E-Series storage system's snapshots. +description: Manage NetApp E-Series manage the storage system's snapshots. +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - When I(state==absent) ensures the I(type) has been removed. + - When I(state==present) ensures the I(type) is available. + - When I(state==rollback) the consistency group will be rolled back to the point-in-time snapshot images selected by I(pit_name or pit_timestamp). + - I(state==rollback) will always return changed since it is not possible to evaluate the current state of the base volume in relation to a snapshot image. + type: str + choices: + - absent + - present + - rollback + default: present + required: false + type: + description: + - Type of snapshot object to effect. + - Group indicates a snapshot consistency group; consistency groups may have one or more base volume members which are defined in I(volumes). + - Pit indicates a snapshot consistency group point-in-time image(s); a snapshot image will be taken of each base volume when I(state==present). + - Warning! When I(state==absent and type==pit), I(pit_name) or I(pit_timestamp) must be defined and all point-in-time images created prior to the + selection will also be deleted. + - View indicates a consistency group snapshot volume of particular point-in-time image(s); snapshot volumes will be created for each base volume member. + - Views are created from images from a single point-in-time so once created they cannot be modified. + type: str + default: group + choices: + - group + - pit + - view + required: false + group_name: + description: + - Name of the snapshot consistency group or snapshot volume. + - Be sure to use different names for snapshot consistency groups and snapshot volumes to avoid name conflicts. + type: str + required: true + volumes: + description: + - Details for each consistency group base volume for defining reserve capacity, preferred reserve capacity storage pool, and snapshot volume options. + - When I(state==present and type==group) the volume entries will be used to add or remove base volume from a snapshot consistency group. + - When I(state==present and type==view) the volume entries will be used to select images from a point-in-time for their respective snapshot volumes. + - If I(state==present and type==view) and I(volume) is not specified then all volumes will be selected with the defaults. + - Views are created from images from a single point-in-time so once created they cannot be modified. + - When I(state==rollback) then I(volumes) can be used to specify which base volumes to rollback; otherwise all consistency group volumes will rollback. + type: list + required: false + suboptions: + volume: + description: + - Base volume for consistency group. + type: str + required: true + reserve_capacity_pct: + description: + - Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW). + - Used to define reserve capacity for both snapshot consistency group volume members and snapshot volumes. + type: int + default: 40 + required: false + preferred_reserve_storage_pool: + description: + - Preferred storage pool or volume group for the reserve capacity volume. + - The base volume's storage pool or volume group will be selected by default if not defined. + - Used to specify storage pool or volume group for both snapshot consistency group volume members and snapshot volumes + type: str + required: false + snapshot_volume_writable: + description: + - Whether snapshot volume of base volume images should be writable. + type: bool + default: true + required: false + snapshot_volume_validate: + description: + - Whether snapshot volume should be validated which includes both a media scan and parity validation. + type: bool + default: false + required: false + snapshot_volume_host: + description: + - Host or host group to map snapshot volume. + type: str + required: false + maximum_snapshots: + description: + - Total number of snapshot images to maintain. + type: int + default: 32 + required: false + reserve_capacity_pct: + description: + - Default percentage of base volume capacity to reserve for snapshot copy-on-writes (COW). + - Used to define reserve capacity for both snapshot consistency group volume members and snapshot volumes. + type: int + default: 40 + required: false + preferred_reserve_storage_pool: + description: + - Default preferred storage pool or volume group for the reserve capacity volume. + - The base volume's storage pool or volume group will be selected by default if not defined. + - Used to specify storage pool or volume group for both snapshot consistency group volume members and snapshot volumes + type: str + required: false + alert_threshold_pct: + description: + - Percent of filled reserve capacity to issue alert. + type: int + default: 75 + required: false + reserve_capacity_full_policy: + description: + - Policy for full reserve capacity. + - Purge deletes the oldest snapshot image for the base volume in the consistency group. + - Reject writes to base volume (keep snapshot images valid). + choices: + - purge + - reject + type: str + default: purge + required: false + rollback_priority: + description: + - Storage system priority given to restoring snapshot point in time. + type: str + choices: + - highest + - high + - medium + - low + - lowest + default: medium + required: false + rollback_backup: + description: + - Whether a point-in-time snapshot should be taken prior to performing a rollback. + type: bool + default: true + required: false + pit_name: + description: + - Name of a consistency group's snapshot images. + type: str + required: false + pit_description: + description: + - Arbitrary description for a consistency group's snapshot images + type: str + required: false + pit_timestamp: + description: + - Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional) + - Define only as much time as necessary to distinguish the desired snapshot image from the others. + - 24 hour time will be assumed if day-period indicator (AM, PM) is not specified. + - The terms latest and oldest may be used to select newest and oldest consistency group images. + - Mutually exclusive with I(pit_name or pit_description) + type: str + required: false + view_name: + description: + - Consistency group snapshot volume group. + - Required when I(state==volume) or when ensuring the views absence when I(state==absent). + type: str + required: false + view_host: + description: + - Default host or host group to map snapshot volumes. + type: str + required: false + view_writable: + description: + - Default whether snapshot volumes should be writable. + type: bool + default: true + required: false + view_validate: + description: + - Default whether snapshop volumes should be validated. + type: bool + default: false + required: false +notes: + - Key-value pairs are used to keep track of snapshot names and descriptions since the snapshot point-in-time images do have metadata associated with their + data structures; therefore, it is necessary to clean out old keys that are no longer associated with an actual image. This cleaning action is performed each + time this module is executed. +""" +EXAMPLES = """ +- name: Ensure snapshot consistency group exists. + na_santricity_snapshot: + ssid: "1" + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + state: present + type: group + group_name: snapshot_group1 + volumes: + - volume: vol1 + reserve_capacity_pct: 20 + preferred_reserve_storage_pool: vg1 + - volume: vol2 + reserve_capacity_pct: 30 + - volume: vol3 + alert_threshold_pct: 80 + maximum_snapshots: 30 +- name: Take the current consistency group's base volumes point-in-time snapshot images. + na_santricity_snapshot: + ssid: "1" + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + state: present + type: pit + group_name: snapshot_group1 + pit_name: pit1 + pit_description: Initial consistency group's point-in-time snapshot images. +- name: Ensure snapshot consistency group view exists and is mapped to host group. + na_santricity_snapshot: + ssid: "1" + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + state: present + type: view + group_name: snapshot_group1 + pit_name: pit1 + view_name: view1 + view_host: view1_hosts_group + volumes: + - volume: vol1 + reserve_capacity_pct: 20 + preferred_reserve_storage_pool: vg4 + snapshot_volume_writable: false + snapshot_volume_validate: true + - volume: vol2 + reserve_capacity_pct: 20 + preferred_reserve_storage_pool: vg4 + snapshot_volume_writable: true + snapshot_volume_validate: true + - volume: vol3 + reserve_capacity_pct: 20 + preferred_reserve_storage_pool: vg4 + snapshot_volume_writable: false + snapshot_volume_validate: true + alert_threshold_pct: 80 + maximum_snapshots: 30 +- name: Rollback base volumes to consistency group's point-in-time pit1. + na_santricity_snapshot: + ssid: "1" + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + state: present + type: group + group_name: snapshot_group1 + pit_name: pit1 + rollback: true + rollback_priority: high +- name: Ensure snapshot consistency group view no longer exists. + na_santricity_snapshot: + ssid: "1" + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + state: absent + type: view + group_name: snapshot_group1 + view_name: view1 +- name: Ensure that the consistency group's base volumes point-in-time snapshot images pit1 no longer exists. + na_santricity_snapshot: + ssid: "1" + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + state: absent + type: image + group_name: snapshot_group1 + pit_name: pit1 +- name: Ensure snapshot consistency group no longer exists. + na_santricity_snapshot: + ssid: "1" + api_url: https://192.168.1.100:8443/devmgr/v2 + api_username: admin + api_password: adminpass + state: absent + type: group + group_name: snapshot_group1 +""" +RETURN = """ +changed: + description: Whether changes have been made. + type: bool + returned: always +group_changes: + description: All changes performed to the consistency group. + type: dict + returned: always +deleted_metadata_keys: + description: Keys that were purged from the key-value datastore. + type: list + returned: always +""" +from datetime import datetime +import re +from time import sleep + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule + + +class NetAppESeriesSnapshot(NetAppESeriesModule): + def __init__(self): + ansible_options = dict(state=dict(type="str", default="present", choices=["absent", "present", "rollback"], required=False), + type=dict(type="str", default="group", choices=["group", "pit", "view"], required=False), + group_name=dict(type="str", required=True), + volumes=dict(type="list", required=False, + suboptions=dict(volume=dict(type="str", required=True), + reserve_capacity_pct=dict(type="int", default=40, required=False), + preferred_reserve_storage_pool=dict(type="str", required=False), + snapshot_volume_writable=dict(type="bool", default=True, required=False), + snapshot_volume_validate=dict(type="bool", default=False, required=False), + snapshot_volume_host=dict(type="str", default=None, required=False), + snapshot_volume_lun=dict(type="int", default=None, required=False))), + maximum_snapshots=dict(type="int", default=32, required=False), + reserve_capacity_pct=dict(type="int", default=40, required=False), + preferred_reserve_storage_pool=dict(type="str", required=False), + alert_threshold_pct=dict(type="int", default=75, required=False), + reserve_capacity_full_policy=dict(type="str", default="purge", choices=["purge", "reject"], required=False), + rollback_priority=dict(type="str", default="medium", choices=["highest", "high", "medium", "low", "lowest"], required=False), + rollback_backup=dict(type="bool", default=True, required=False), + pit_name=dict(type="str", required=False), + pit_description=dict(type="str", required=False), + pit_timestamp=dict(type="str", required=False), + view_name=dict(type="str", required=False), + view_host=dict(type="str", default=None, required=False), + view_writable=dict(type="bool", default=True, required=False), + view_validate=dict(type="bool", default=False, required=False)) + + super(NetAppESeriesSnapshot, self).__init__(ansible_options=ansible_options, + web_services_version="05.00.0000.0000", + supports_check_mode=True) + args = self.module.params + self.state = args["state"] + self.type = args["type"] + self.group_name = args["group_name"] + self.maximum_snapshots = args["maximum_snapshots"] + self.reserve_capacity_pct = args["reserve_capacity_pct"] + self.preferred_reserve_storage_pool = args["preferred_reserve_storage_pool"] + self.alert_threshold_pct = args["alert_threshold_pct"] + self.reserve_capacity_full_policy = "purgepit" if args["reserve_capacity_full_policy"] == "purge" else "failbasewrites" + self.rollback_priority = args["rollback_priority"] + self.rollback_backup = args["rollback_backup"] + self.rollback_priority = args["rollback_priority"] + self.pit_name = args["pit_name"] + self.pit_description = args["pit_description"] + self.view_name = args["view_name"] + self.view_host = args["view_host"] + self.view_writable = args["view_writable"] + self.view_validate = args["view_validate"] + + # Complete volume definitions. + self.volumes = {} + if args["volumes"]: + for volume_info in args["volumes"]: + reserve_capacity_pct = volume_info["reserve_capacity_pct"] if "reserve_capacity_pct" in volume_info else self.reserve_capacity_pct + snapshot_volume_writable = volume_info["snapshot_volume_writable"] if "snapshot_volume_writable" in volume_info else self.view_writable + snapshot_volume_validate = volume_info["snapshot_volume_validate"] if "snapshot_volume_validate" in volume_info else self.view_validate + snapshot_volume_host = volume_info["snapshot_volume_host"] if "snapshot_volume_host" in volume_info else self.view_host + snapshot_volume_lun = volume_info["snapshot_volume_lun"] if "snapshot_volume_lun" in volume_info else None + if "preferred_reserve_storage_pool" in volume_info and volume_info["preferred_reserve_storage_pool"]: + preferred_reserve_storage_pool = volume_info["preferred_reserve_storage_pool"] + else: + preferred_reserve_storage_pool = self.preferred_reserve_storage_pool + + self.volumes.update({volume_info["volume"]: {"reserve_capacity_pct": reserve_capacity_pct, + "preferred_reserve_storage_pool": preferred_reserve_storage_pool, + "snapshot_volume_writable": snapshot_volume_writable, + "snapshot_volume_validate": snapshot_volume_validate, + "snapshot_volume_host": snapshot_volume_host, + "snapshot_volume_lun": snapshot_volume_lun}}) + + # Check and convert pit_timestamp to datetime object. volume: snap-vol1 + self.pit_timestamp = None + self.pit_timestamp_tokens = 0 + if args["pit_timestamp"]: + if args["pit_timestamp"] in ["newest", "oldest"]: + self.pit_timestamp = args["pit_timestamp"] + elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]): + self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I:%M:%S %p") + self.pit_timestamp_tokens = 6 + elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]): + self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I:%M %p") + self.pit_timestamp_tokens = 5 + elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]): + self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I %p") + self.pit_timestamp_tokens = 4 + elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}", args["pit_timestamp"]): + self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H:%M:%S") + self.pit_timestamp_tokens = 6 + elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}", args["pit_timestamp"]): + self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H:%M") + self.pit_timestamp_tokens = 5 + elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}", args["pit_timestamp"]): + self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H") + self.pit_timestamp_tokens = 4 + elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2}", args["pit_timestamp"]): + self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d") + self.pit_timestamp_tokens = 3 + else: + self.module.fail_json(msg="Invalid argument! pit_timestamp must be in the form YYYY-MM-DD HH:MM:SS (AM|PM) (time portion is optional)." + " Array [%s]." % self.ssid) + + # Check for required arguments + if self.state == "present": + if self.type == "group": + if not self.volumes: + self.module.fail_json(msg="Missing argument! Volumes must be defined to create a snapshot consistency group." + " Group [%s]. Array [%s]" % (self.group_name, self.ssid)) + elif self.type == "pit": + if self.pit_timestamp and self.pit_name: + self.module.fail_json(msg="Invalid arguments! Either define pit_name with or without pit_description or pit_timestamp." + " Group [%s]. Array [%s]" % (self.group_name, self.ssid)) + + elif self.type == "view": + if not self.view_name: + self.module.fail_json(msg="Missing argument! view_name must be defined to create a snapshot consistency group view." + " Group [%s]. Array [%s]" % (self.group_name, self.ssid)) + if not (self.pit_name or self.pit_timestamp): + self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time" + " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid)) + elif self.state == "rollback": + if not (self.pit_name or self.pit_timestamp): + self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time" + " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid)) + else: + if self.type == "pit": + if self.pit_name and self.pit_timestamp: + self.module.fail_json(msg="Invalid arguments! Either define pit_name or pit_timestamp." + " Group [%s]. Array [%s]" % (self.group_name, self.ssid)) + if not (self.pit_name or self.pit_timestamp): + self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time" + " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid)) + elif self.type == "view": + if not self.view_name: + self.module.fail_json(msg="Missing argument! view_name must be defined to create a snapshot consistency group view." + " Group [%s]. Array [%s]" % (self.group_name, self.ssid)) + + # Check whether request needs to be forwarded on to the controller web services rest api. + self.url_path_prefix = "" + if not self.is_embedded(): + if self.ssid == "0" or self.ssid.lower() == "proxy": + self.module.fail_json(msg="Snapshot is not a valid operation for SANtricity Web Services Proxy! ssid cannot be '0' or 'proxy'." + " Array [%s]" % self.ssid) + self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid + + self.cache = {"get_consistency_group": {}, + "get_all_storage_pools_by_id": {}, + "get_all_storage_pools_by_name": {}, + "get_all_volumes_by_id": {}, + "get_all_volumes_by_name": {}, + "get_all_hosts_and_hostgroups_by_name": {}, + "get_all_hosts_and_hostgroups_by_id": {}, + "get_mapping_by_id": {}, + "get_mapping_by_name": {}, + "get_all_concat_volumes_by_id": {}, + "get_pit_images_by_timestamp": {}, + "get_pit_images_by_name": {}, + "get_pit_images_metadata": {}, + "get_unused_pit_key_values": [], + "get_pit_info": None, + "get_consistency_group_view": {}, + "view_changes_required": []} + + def get_all_storage_pools_by_id(self): + """Retrieve and return all storage pools/volume groups.""" + if not self.cache["get_all_storage_pools_by_id"]: + try: + rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid) + + for storage_pool in storage_pools: + self.cache["get_all_storage_pools_by_id"].update({storage_pool["id"]: storage_pool}) + self.cache["get_all_storage_pools_by_name"].update({storage_pool["name"]: storage_pool}) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volumes! Error [%s]. Array [%s]." % (error, self.ssid)) + + return self.cache["get_all_storage_pools_by_id"] + + def get_all_storage_pools_by_name(self): + """Retrieve and return all storage pools/volume groups.""" + if not self.cache["get_all_storage_pools_by_name"]: + self.get_all_storage_pools_by_id() + + return self.cache["get_all_storage_pools_by_name"] + + def get_all_volumes_by_id(self): + """Retrieve and return a dictionary of all thick and thin volumes keyed by id.""" + if not self.cache["get_all_volumes_by_id"]: + try: + rc, thick_volumes = self.request("storage-systems/%s/volumes" % self.ssid) + rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid) + + for volume in thick_volumes + thin_volumes: + self.cache["get_all_volumes_by_id"].update({volume["id"]: volume}) + self.cache["get_all_volumes_by_name"].update({volume["name"]: volume}) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volumes! Error [%s]. Array [%s]." % (error, self.ssid)) + + return self.cache["get_all_volumes_by_id"] + + def get_all_volumes_by_name(self): + """Retrieve and return a dictionary of all thick and thin volumes keyed by name.""" + if not self.cache["get_all_volumes_by_name"]: + self.get_all_volumes_by_id() + + return self.cache["get_all_volumes_by_name"] + + def get_all_hosts_and_hostgroups_by_id(self): + """Retrieve and return a dictionary of all host and host groups keyed by name.""" + if not self.cache["get_all_hosts_and_hostgroups_by_id"]: + try: + rc, hostgroups = self.request("storage-systems/%s/host-groups" % self.ssid) + # hostgroup_by_id = {hostgroup["id"]: hostgroup for hostgroup in hostgroups} + hostgroup_by_id = dict((hostgroup["id"], hostgroup) for hostgroup in hostgroups) + + rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid) + for host in hosts: + if host["clusterRef"] != "0000000000000000000000000000000000000000": + hostgroup_name = hostgroup_by_id[host["clusterRef"]]["name"] + + if host["clusterRef"] not in self.cache["get_all_hosts_and_hostgroups_by_id"].keys(): + hostgroup_by_id[host["clusterRef"]].update({"hostgroup": True, "host_ids": [host["id"]]}) + self.cache["get_all_hosts_and_hostgroups_by_id"].update({host["clusterRef"]: hostgroup_by_id[host["clusterRef"]]}) + self.cache["get_all_hosts_and_hostgroups_by_name"].update({hostgroup_name: hostgroup_by_id[host["clusterRef"]]}) + else: + self.cache["get_all_hosts_and_hostgroups_by_id"][host["clusterRef"]]["host_ids"].append(host["id"]) + self.cache["get_all_hosts_and_hostgroups_by_name"][hostgroup_name]["host_ids"].append(host["id"]) + + self.cache["get_all_hosts_and_hostgroups_by_id"].update({host["id"]: host, "hostgroup": False}) + self.cache["get_all_hosts_and_hostgroups_by_name"].update({host["name"]: host, "hostgroup": False}) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve all host and host group objects! Error [%s]. Array [%s]." % (error, self.ssid)) + + return self.cache["get_all_hosts_and_hostgroups_by_id"] + + def get_all_hosts_and_hostgroups_by_name(self): + """Retrieve and return a dictionary of all thick and thin volumes keyed by name.""" + if not self.cache["get_all_hosts_and_hostgroups_by_name"]: + self.get_all_hosts_and_hostgroups_by_id() + + return self.cache["get_all_hosts_and_hostgroups_by_name"] + + def get_mapping_by_id(self): + """Retrieve and return a dictionary of """ + if not self.cache["get_mapping_by_id"]: + existing_hosts_and_hostgroups_by_id = self.get_all_hosts_and_hostgroups_by_id() + existing_hosts_and_hostgroups_by_name = self.get_all_hosts_and_hostgroups_by_name() + try: + rc, mappings = self.request("storage-systems/%s/volume-mappings" % self.ssid) + + for mapping in mappings: + host_ids = [mapping["mapRef"]] + map_entry = {mapping["lun"]: mapping["volumeRef"]} + + if mapping["type"] == "cluster": + host_ids = existing_hosts_and_hostgroups_by_id[mapping["mapRef"]]["host_ids"] + if mapping["mapRef"] in self.cache["get_mapping_by_id"].keys(): + self.cache["get_mapping_by_id"][mapping["mapRef"]].update(map_entry) + self.cache["get_mapping_by_name"][mapping["mapRef"]].update(map_entry) + else: + self.cache["get_mapping_by_id"].update({mapping["mapRef"]: map_entry}) + self.cache["get_mapping_by_name"].update({mapping["mapRef"]: map_entry}) + + for host_id in host_ids: + if host_id in self.cache["get_mapping_by_id"].keys(): + self.cache["get_mapping_by_id"][mapping["mapRef"]].update(map_entry) + self.cache["get_mapping_by_name"][mapping["mapRef"]].update(map_entry) + else: + self.cache["get_mapping_by_id"].update({host_id: map_entry}) + self.cache["get_mapping_by_name"].update({host_id: map_entry}) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve all volume map definitions! Error [%s]. Array [%s]." % (error, self.ssid)) + + return self.cache["get_mapping_by_id"] + + def get_mapping_by_name(self): + """Retrieve and return a dictionary of """ + if not self.cache["get_mapping_by_name"]: + self.get_mapping_by_id() + + return self.cache["get_mapping_by_name"] + + def get_all_concat_volumes_by_id(self): + """Retrieve and return a dictionary of all thick and thin volumes keyed by id.""" + if not self.cache["get_all_concat_volumes_by_id"]: + try: + rc, concat_volumes = self.request("storage-systems/%s/repositories/concat" % self.ssid) + + for volume in concat_volumes: + self.cache["get_all_concat_volumes_by_id"].update({volume["id"]: volume}) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve reserve capacity volumes! Error [%s]. Array [%s]." % (error, self.ssid)) + + return self.cache["get_all_concat_volumes_by_id"] + + def get_consistency_group(self): + """Retrieve consistency groups and return information on the expected group.""" + existing_volumes = self.get_all_volumes_by_id() + + if not self.cache["get_consistency_group"]: + try: + rc, consistency_groups = self.request("storage-systems/%s/consistency-groups" % self.ssid) + + for consistency_group in consistency_groups: + if consistency_group["label"] == self.group_name: + rc, member_volumes = self.request("storage-systems/%s/consistency-groups/%s/member-volumes" % (self.ssid, consistency_group["id"])) + + self.cache["get_consistency_group"].update({"consistency_group_id": consistency_group["cgRef"], + "alert_threshold_pct": consistency_group["fullWarnThreshold"], + "maximum_snapshots": consistency_group["autoDeleteLimit"], + "rollback_priority": consistency_group["rollbackPriority"], + "reserve_capacity_full_policy": consistency_group["repFullPolicy"], + "sequence_numbers": consistency_group["uniqueSequenceNumber"], + "base_volumes": []}) + + for member_volume in member_volumes: + base_volume = existing_volumes[member_volume["volumeId"]] + base_volume_size_b = int(base_volume["totalSizeInBytes"]) + total_reserve_capacity_b = int(member_volume["totalRepositoryCapacity"]) + reserve_capacity_pct = int(round(float(total_reserve_capacity_b) / float(base_volume_size_b) * 100)) + + rc, concat = self.request("storage-systems/%s/repositories/concat/%s" % (self.ssid, member_volume["repositoryVolume"])) + + self.cache["get_consistency_group"]["base_volumes"].append({"name": base_volume["name"], + "id": base_volume["id"], + "base_volume_size_b": base_volume_size_b, + "total_reserve_capacity_b": total_reserve_capacity_b, + "reserve_capacity_pct": reserve_capacity_pct, + "repository_volume_info": concat}) + break + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve snapshot consistency groups! Error [%s]. Array [%s]." % (error, self.ssid)) + + return self.cache["get_consistency_group"] + + def get_candidate(self, volume_name, volume_info): + """Return candidate for volume.""" + existing_storage_pools_by_id = self.get_all_storage_pools_by_id() + existing_storage_pools_by_name = self.get_all_storage_pools_by_name() + existing_volumes_by_name = self.get_all_volumes_by_name() + + if volume_name in existing_volumes_by_name: + base_volume_storage_pool_id = existing_volumes_by_name[volume_name]["volumeGroupRef"] + base_volume_storage_pool_name = existing_storage_pools_by_id[base_volume_storage_pool_id]["name"] + + preferred_reserve_storage_pool = base_volume_storage_pool_id + if volume_info["preferred_reserve_storage_pool"]: + if volume_info["preferred_reserve_storage_pool"] in existing_storage_pools_by_name: + preferred_reserve_storage_pool = existing_storage_pools_by_name[volume_info["preferred_reserve_storage_pool"]]["id"] + else: + self.module.fail_json(msg="Preferred storage pool or volume group does not exist! Storage pool [%s]. Group [%s]." + " Array [%s]." % (volume_info["preferred_reserve_storage_pool"], self.group_name, self.ssid)) + + volume_info.update({"name": volume_name, + "id": existing_volumes_by_name[volume_name]["id"], + "storage_pool_name": base_volume_storage_pool_name, + "storage_pool_id": base_volume_storage_pool_id, + "preferred_reserve_storage_pool": preferred_reserve_storage_pool, + "candidate": None}) + + else: + self.module.fail_json(msg="Volume does not exist! Volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid)) + + candidate_request = {"candidateRequest": {"baseVolumeRef": volume_info["id"], + "percentCapacity": volume_info["reserve_capacity_pct"], + "concatVolumeType": "snapshot"}} + try: + rc, candidates = self.request("storage-systems/%s/repositories/concat/single" % self.ssid, method="POST", data=candidate_request) + for candidate in candidates: + if candidate["volumeGroupId"] == volume_info["preferred_reserve_storage_pool"]: + volume_info["candidate"] = candidate + break + else: + self.module.fail_json(msg="Failed to retrieve capacity volume candidate in preferred storage pool or volume group!" + " Volume [%s]. Group [%s]. Array [%s]." % (volume_info["name"], self.group_name, self.ssid)) + except Exception as error: + self.module.fail_json(msg="Failed to get reserve capacity candidates!" + " Volumes %s. Group [%s]. Array [%s]. Error [%s]" % (volume_info["name"], self.group_name, self.ssid, error)) + + return volume_info + + def get_pit_images_metadata(self): + """Retrieve and return consistency group snapshot images' metadata keyed on timestamps.""" + if not self.cache["get_pit_images_metadata"]: + try: + rc, key_values = self.request(self.url_path_prefix + "key-values") + + for entry in key_values: + if re.search("ansible\\|%s\\|" % self.group_name, entry["key"]): + name = entry["key"].replace("ansible|%s|" % self.group_name, "") + values = entry["value"].split("|") + if len(values) == 3: + timestamp, image_id, description = values + self.cache["get_pit_images_metadata"].update({timestamp: {"name": name, "description": description}}) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve consistency group snapshot images metadata! Array [%s]. Error [%s]." % (self.ssid, error)) + + return self.cache["get_pit_images_metadata"] + + def get_pit_images_by_timestamp(self): + """Retrieve and return snapshot images.""" + if not self.cache["get_pit_images_by_timestamp"]: + group_id = self.get_consistency_group()["consistency_group_id"] + images_metadata = self.get_pit_images_metadata() + existing_volumes_by_id = self.get_all_volumes_by_id() + + try: + rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots" % (self.ssid, group_id)) + for image_info in images: + + metadata = {"id": "", "name": "", "description": ""} + if image_info["pitTimestamp"] in images_metadata.keys(): + metadata = images_metadata[image_info["pitTimestamp"]] + + timestamp = datetime.fromtimestamp(int(image_info["pitTimestamp"])) + info = {"id": image_info["id"], + "name": metadata["name"], + "timestamp": timestamp, + "description": metadata["description"], + "sequence_number": image_info["pitSequenceNumber"], + "base_volume_id": image_info["baseVol"], + "base_volume_name": existing_volumes_by_id[image_info["baseVol"]]["name"], + "image_info": image_info} + + if timestamp not in self.cache["get_pit_images_by_timestamp"].keys(): + self.cache["get_pit_images_by_timestamp"].update({timestamp: {"sequence_number": image_info["pitSequenceNumber"], "images": [info]}}) + if metadata["name"]: + self.cache["get_pit_images_by_name"].update({metadata["name"]: {"sequence_number": image_info["pitSequenceNumber"], + "images": [info]}}) + else: + self.cache["get_pit_images_by_timestamp"][timestamp]["images"].append(info) + if metadata["name"]: + self.cache["get_pit_images_by_name"][metadata["name"]]["images"].append(info) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve consistency group snapshot images!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + return self.cache["get_pit_images_by_timestamp"] + + def get_pit_images_by_name(self): + """Retrieve and return snapshot images.""" + if not self.cache["get_pit_images_by_name"]: + self.get_pit_images_by_timestamp() + + return self.cache["get_pit_images_by_name"] + + def get_unused_pit_key(self): + """Determine all embedded pit key-values that do not match existing snapshot images.""" + if not self.cache["get_unused_pit_key_values"]: + try: + rc, images = self.request("storage-systems/%s/snapshot-images" % self.ssid) + rc, key_values = self.request("key-values") + + for key_value in key_values: + key = key_value["key"] + value = key_value["value"] + if re.match("ansible\\|.*\\|.*", value): + for image in images: + if str(image["pitTimestamp"]) == value.split("|")[0]: + break + else: + self.cache["get_unused_pit_key_values"].append(key) + except Exception as error: + self.module.warn("Failed to retrieve all snapshots to determine all key-value pairs that do no match a point-in-time snapshot images!" + " Array [%s]. Error [%s]." % (self.ssid, error)) + + return self.cache["get_unused_pit_key_values"] + + def get_pit_info(self): + """Determine consistency group's snapshot images base on provided arguments (pit_name or timestamp).""" + + def _check_timestamp(timestamp): + """Check whether timestamp matches I(pit_timestamp)""" + return (self.pit_timestamp.year == timestamp.year and + self.pit_timestamp.month == timestamp.month and + self.pit_timestamp.day == timestamp.day and + (self.pit_timestamp_tokens < 4 or self.pit_timestamp.hour == timestamp.hour) and + (self.pit_timestamp_tokens < 5 or self.pit_timestamp.minute == timestamp.minute) and + (self.pit_timestamp_tokens < 6 or self.pit_timestamp.second == timestamp.second)) + + if self.cache["get_pit_info"] is None: + group = self.get_consistency_group() + pit_images_by_timestamp = self.get_pit_images_by_timestamp() + pit_images_by_name = self.get_pit_images_by_name() + + if self.pit_name: + if self.pit_name in pit_images_by_name.keys(): + self.cache["get_pit_info"] = pit_images_by_name[self.pit_name] + + if self.pit_timestamp: + for image in self.cache["get_pit_info"]["images"]: + if not _check_timestamp(image["timestamp"]): + self.module.fail_json(msg="Snapshot image does not exist that matches both name and supplied timestamp!" + " Group [%s]. Image [%s]. Array [%s]." % (self.group_name, image, self.ssid)) + elif self.pit_timestamp and pit_images_by_timestamp: + sequence_number = None + if self.pit_timestamp == "newest": + sequence_number = group["sequence_numbers"][-1] + + for image_timestamp in pit_images_by_timestamp.keys(): + if int(pit_images_by_timestamp[image_timestamp]["sequence_number"]) == int(sequence_number): + self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp] + break + elif self.pit_timestamp == "oldest": + sequence_number = group["sequence_numbers"][0] + for image_timestamp in pit_images_by_timestamp.keys(): + if int(pit_images_by_timestamp[image_timestamp]["sequence_number"]) == int(sequence_number): + self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp] + break + else: + for image_timestamp in pit_images_by_timestamp.keys(): + if _check_timestamp(image_timestamp): + if sequence_number and sequence_number != pit_images_by_timestamp[image_timestamp]["sequence_number"]: + self.module.fail_json(msg="Multiple snapshot images match the provided timestamp and do not have the same sequence number!" + " Group [%s]. Array [%s]." % (self.group_name, self.ssid)) + + sequence_number = pit_images_by_timestamp[image_timestamp]["sequence_number"] + self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp] + + if self.state != "absent" and self.type != "pit" and self.cache["get_pit_info"] is None: + self.module.fail_json(msg="Snapshot consistency group point-in-time image does not exist! Name [%s]. Timestamp [%s]. Group [%s]." + " Array [%s]." % (self.pit_name, self.pit_timestamp, self.group_name, self.ssid)) + + return self.cache["get_pit_info"] + + def create_changes_required(self): + """Determine the required state changes for creating a new consistency group.""" + changes = {"create_group": {"name": self.group_name, + "alert_threshold_pct": self.alert_threshold_pct, + "maximum_snapshots": self.maximum_snapshots, + "reserve_capacity_full_policy": self.reserve_capacity_full_policy, + "rollback_priority": self.rollback_priority}, + "add_volumes": self.volumes} + + return changes + + def update_changes_required(self): + """Determine the required state changes for updating an existing consistency group.""" + group = self.get_consistency_group() + changes = {"update_group": {}, + "add_volumes": [], + "remove_volumes": [], + "expand_reserve_capacity": [], + "trim_reserve_capacity": []} + + # Check if consistency group settings need to be updated. + if group["alert_threshold_pct"] != self.alert_threshold_pct: + changes["update_group"].update({"alert_threshold_pct": self.alert_threshold_pct}) + if group["maximum_snapshots"] != self.maximum_snapshots: + changes["update_group"].update({"maximum_snapshots": self.maximum_snapshots}) + if group["rollback_priority"] != self.rollback_priority: + changes["update_group"].update({"rollback_priority": self.rollback_priority}) + if group["reserve_capacity_full_policy"] != self.reserve_capacity_full_policy: + changes["update_group"].update({"reserve_capacity_full_policy": self.reserve_capacity_full_policy}) + + # Check if base volumes need to be added or removed from consistency group. + # remaining_base_volumes = {base_volumes["name"]: base_volumes for base_volumes in group["base_volumes"]} # NOT python2.6 compatible + remaining_base_volumes = dict((base_volumes["name"], base_volumes) for base_volumes in group["base_volumes"]) + add_volumes = {} + expand_volumes = {} + + for volume_name, volume_info in self.volumes.items(): + reserve_capacity_pct = volume_info["reserve_capacity_pct"] + if volume_name in remaining_base_volumes: + + # Check if reserve capacity needs to be expanded or trimmed. + base_volume_reserve_capacity_pct = remaining_base_volumes[volume_name]["reserve_capacity_pct"] + if reserve_capacity_pct > base_volume_reserve_capacity_pct: + expand_reserve_capacity_pct = reserve_capacity_pct - base_volume_reserve_capacity_pct + expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct, + "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"], + "reserve_volume_id": remaining_base_volumes[volume_name]["repository_volume_info"]["id"]}}) + + elif reserve_capacity_pct < base_volume_reserve_capacity_pct: + existing_volumes_by_id = self.get_all_volumes_by_id() + existing_volumes_by_name = self.get_all_volumes_by_name() + existing_concat_volumes_by_id = self.get_all_concat_volumes_by_id() + trim_pct = base_volume_reserve_capacity_pct - reserve_capacity_pct + + # Check whether there are any snapshot images; if there are then throw an exception indicating that a trim operation + # cannot be done when snapshots exist. + for timestamp, image in self.get_pit_images_by_timestamp(): + if existing_volumes_by_id(image["base_volume_id"])["name"] == volume_name: + self.module.fail_json(msg="Reserve capacity cannot be trimmed when snapshot images exist for base volume!" + " Base volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid)) + + # Collect information about all that needs to be trimmed to meet or exceed required trim percentage. + concat_volume_id = remaining_base_volumes[volume_name]["repository_volume_info"]["id"] + concat_volume_info = existing_concat_volumes_by_id[concat_volume_id] + base_volume_info = existing_volumes_by_name[volume_name] + base_volume_size_bytes = int(base_volume_info["totalSizeInBytes"]) + + total_member_volume_size_bytes = 0 + member_volumes_to_trim = [] + for trim_count, member_volume_id in enumerate(reversed(concat_volume_info["memberRefs"][1:])): + member_volume_info = existing_volumes_by_id[member_volume_id] + member_volumes_to_trim.append(member_volume_info) + + total_member_volume_size_bytes += int(member_volume_info["totalSizeInBytes"]) + total_trimmed_size_pct = round(total_member_volume_size_bytes / base_volume_size_bytes * 100) + + if total_trimmed_size_pct >= trim_pct: + changes["trim_reserve_capacity"].append({"concat_volume_id": concat_volume_id, "trim_count": trim_count + 1}) + + # Expand after trim if needed. + if total_trimmed_size_pct > trim_pct: + expand_reserve_capacity_pct = total_trimmed_size_pct - trim_pct + expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct, + "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"], + "reserve_volume_id": remaining_base_volumes[volume_name]["repository_volume_info"]["id"]}}) + break + else: + initial_reserve_volume_info = existing_volumes_by_id[concat_volume_info["memberRefs"][0]] + minimum_capacity_pct = round(int(initial_reserve_volume_info["totalSizeInBytes"]) / base_volume_size_bytes * 100) + self.module.fail_json(msg="Cannot delete initial reserve capacity volume! Minimum reserve capacity percent [%s]. Base volume [%s]. " + "Group [%s]. Array [%s]." % (minimum_capacity_pct, volume_name, self.group_name, self.ssid)) + + remaining_base_volumes.pop(volume_name) + else: + add_volumes.update({volume_name: {"reserve_capacity_pct": reserve_capacity_pct, + "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"]}}) + + changes["add_volumes"] = add_volumes + changes["expand_reserve_capacity"] = expand_volumes + changes["remove_volumes"] = remaining_base_volumes + return changes + + def get_consistency_group_view(self): + """Determine and return consistency group view.""" + group_id = self.get_consistency_group()["consistency_group_id"] + + if not self.cache["get_consistency_group_view"]: + try: + rc, views = self.request("storage-systems/%s/consistency-groups/%s/views" % (self.ssid, group_id)) + + # Check for existing view (collection of snapshot volumes for a consistency group) within consistency group. + for view in views: + if view["name"] == self.view_name: + self.cache["get_consistency_group_view"] = view + self.cache["get_consistency_group_view"].update({"snapshot_volumes": []}) + + # Determine snapshot volumes associated with view. + try: + rc, snapshot_volumes = self.request("storage-systems/%s/snapshot-volumes" % self.ssid) + + for snapshot_volume in snapshot_volumes: + if (snapshot_volume["membership"] and + snapshot_volume["membership"]["viewType"] == "member" and + snapshot_volume["membership"]["cgViewRef"] == view["cgViewRef"]): + self.cache["get_consistency_group_view"]["snapshot_volumes"].append(snapshot_volume) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve host mapping information!." + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve consistency group's views!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + return self.cache["get_consistency_group_view"] + + def create_view_changes_required(self): + """Determine whether snapshot consistency group point-in-time view needs to be created.""" + changes = {} + snapshot_images_info = self.get_pit_info() + changes.update({"name": self.view_name, + "sequence_number": snapshot_images_info["sequence_number"], + "images": snapshot_images_info["images"], + "volumes": self.volumes}) + + return changes + + def update_view_changes_required(self): + """Determine the changes required for snapshot consistency group point-in-time view.""" + changes = {"expand_reserve_capacity": [], + "trim_reserve_capacity": [], + "map_snapshot_volumes_mapping": [], + "unmap_snapshot_volumes_mapping": [], + "move_snapshot_volumes_mapping": [], + "update_snapshot_volumes_writable": []} + view = self.get_consistency_group_view() + host_objects_by_name = self.get_all_hosts_and_hostgroups_by_name() + host_objects_by_id = self.get_all_hosts_and_hostgroups_by_id() + existing_volumes_by_id = self.get_all_volumes_by_id() + if view: + if len(view["snapshot_volumes"]) != len(self.volumes): + self.module.fail_json(msg="Cannot add or remove snapshot volumes once view is created! Group [%s]. Array [%s]." % (self.group_name, self.ssid)) + + expand_volumes = {} + writable_volumes = {} + for snapshot_volume in view["snapshot_volumes"]: + for volume_name, volume_info in self.volumes.items(): + if existing_volumes_by_id[snapshot_volume["baseVol"]]["name"] == volume_name: + + # Check snapshot volume needs mapped to host or hostgroup. + if volume_info["snapshot_volume_host"] and not snapshot_volume["listOfMappings"]: + changes["map_snapshot_volumes_mapping"].append({"mappableObjectId": snapshot_volume["id"], + "lun": volume_info["snapshot_volume_lun"], + "targetId": host_objects_by_name[volume_info["snapshot_volume_host"]]["id"]}) + + # Check snapshot volume needs unmapped to host or hostgroup. + elif not volume_info["snapshot_volume_host"] and snapshot_volume["listOfMappings"]: + changes["unmap_snapshot_volumes_mapping"].append({"snapshot_volume_name": snapshot_volume["name"], + "lun_mapping_reference": snapshot_volume["listOfMappings"][0]["lunMappingRef"]}) + + # Check host mapping needs moved + elif (snapshot_volume["listOfMappings"] and + ((volume_info["snapshot_volume_host"] != host_objects_by_id[snapshot_volume["listOfMappings"][0]["mapRef"]]["name"]) or + (volume_info["snapshot_volume_lun"] != snapshot_volume["listOfMappings"][0]["lun"]))): + changes["move_snapshot_volumes_mapping"].append({"lunMappingRef": snapshot_volume["listOfMappings"][0]["lunMappingRef"], + "lun": volume_info["snapshot_volume_lun"], + "mapRef": host_objects_by_name[volume_info["snapshot_volume_host"]]["id"]}) + # Check writable mode + if volume_info["snapshot_volume_writable"] != (snapshot_volume["accessMode"] == "readWrite"): + volume_info.update({"snapshot_volume_id": snapshot_volume["id"]}) + writable_volumes.update({volume_name: volume_info}) + + # Check reserve capacity. + if volume_info["snapshot_volume_writable"] and snapshot_volume["accessMode"] == "readWrite": + current_reserve_capacity_pct = int(round(float(snapshot_volume["repositoryCapacity"]) / + float(snapshot_volume["baseVolumeCapacity"]) * 100)) + if volume_info["reserve_capacity_pct"] > current_reserve_capacity_pct: + expand_reserve_capacity_pct = volume_info["reserve_capacity_pct"] - current_reserve_capacity_pct + expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct, + "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"], + "reserve_volume_id": snapshot_volume["repositoryVolume"]}}) + + elif volume_info["reserve_capacity_pct"] < current_reserve_capacity_pct: + existing_volumes_by_id = self.get_all_volumes_by_id() + existing_volumes_by_name = self.get_all_volumes_by_name() + existing_concat_volumes_by_id = self.get_all_concat_volumes_by_id() + trim_pct = current_reserve_capacity_pct - volume_info["reserve_capacity_pct"] + + # Collect information about all that needs to be trimmed to meet or exceed required trim percentage. + concat_volume_id = snapshot_volume["repositoryVolume"] + concat_volume_info = existing_concat_volumes_by_id[concat_volume_id] + base_volume_info = existing_volumes_by_name[volume_name] + base_volume_size_bytes = int(base_volume_info["totalSizeInBytes"]) + + total_member_volume_size_bytes = 0 + member_volumes_to_trim = [] + for trim_count, member_volume_id in enumerate(reversed(concat_volume_info["memberRefs"][1:])): + member_volume_info = existing_volumes_by_id[member_volume_id] + member_volumes_to_trim.append(member_volume_info) + + total_member_volume_size_bytes += int(member_volume_info["totalSizeInBytes"]) + total_trimmed_size_pct = round(total_member_volume_size_bytes / base_volume_size_bytes * 100) + + if total_trimmed_size_pct >= trim_pct: + changes["trim_reserve_capacity"].append({"concat_volume_id": concat_volume_id, "trim_count": trim_count + 1}) + + # Expand after trim if needed. + if total_trimmed_size_pct > trim_pct: + expand_reserve_capacity_pct = total_trimmed_size_pct - trim_pct + expand_volumes.update({ + volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct, + "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"], + "reserve_volume_id": snapshot_volume["repositoryVolume"]}}) + break + else: + initial_reserve_volume_info = existing_volumes_by_id[concat_volume_info["memberRefs"][0]] + minimum_capacity_pct = round(int(initial_reserve_volume_info["totalSizeInBytes"]) / base_volume_size_bytes * 100) + self.module.fail_json(msg="Cannot delete initial reserve capacity volume! Minimum reserve capacity percent [%s]. " + "Base volume [%s]. Group [%s]. Array [%s]." % (minimum_capacity_pct, volume_name, + self.group_name, self.ssid)) + changes.update({"expand_reserve_capacity": expand_volumes, + "update_snapshot_volumes_writable": writable_volumes}) + return changes + + def rollback_changes_required(self): + """Determine the changes required for snapshot consistency group point-in-time rollback.""" + return self.get_pit_info() + + def remove_snapshot_consistency_group(self, info): + """remove a new snapshot consistency group.""" + try: + rc, resp = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, info["consistency_group_id"]), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid)) + + def create_snapshot_consistency_group(self, group_info): + """Create a new snapshot consistency group.""" + consistency_group_request = {"name": self.group_name, + "fullWarnThresholdPercent": group_info["alert_threshold_pct"], + "autoDeleteThreshold": group_info["maximum_snapshots"], + "repositoryFullPolicy": group_info["reserve_capacity_full_policy"], + "rollbackPriority": group_info["rollback_priority"]} + + try: + rc, group = self.request("storage-systems/%s/consistency-groups" % self.ssid, method="POST", data=consistency_group_request) + self.cache["get_consistency_group"].update({"consistency_group_id": group["cgRef"]}) + except Exception as error: + self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid)) + + def update_snapshot_consistency_group(self, group_info): + """Create a new snapshot consistency group.""" + group_id = self.get_consistency_group()["consistency_group_id"] + consistency_group_request = {"name": self.group_name} + if "alert_threshold_pct" in group_info.keys(): + consistency_group_request.update({"fullWarnThresholdPercent": group_info["alert_threshold_pct"]}) + if "maximum_snapshots" in group_info.keys(): + consistency_group_request.update({"autoDeleteThreshold": group_info["maximum_snapshots"]}) + if "reserve_capacity_full_policy" in group_info.keys(): + consistency_group_request.update({"repositoryFullPolicy": group_info["reserve_capacity_full_policy"]}) + if "rollback_priority" in group_info.keys(): + consistency_group_request.update({"rollbackPriority": group_info["rollback_priority"]}) + + try: + rc, group = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, group_id), method="POST", data=consistency_group_request) + return group["cgRef"] + except Exception as error: + self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid)) + + def add_base_volumes(self, volumes): + """Add base volume(s) to the consistency group.""" + group_id = self.get_consistency_group()["consistency_group_id"] + member_volume_request = {"volumeToCandidates": {}} + + for volume_name, volume_info in volumes.items(): + candidate = self.get_candidate(volume_name, volume_info) + member_volume_request["volumeToCandidates"].update({volume_info["id"]: candidate["candidate"]["candidate"]}) + + try: + rc, resp = self.request("storage-systems/%s/consistency-groups/%s/member-volumes/batch" % (self.ssid, group_id), + method="POST", data=member_volume_request) + except Exception as error: + self.module.fail_json(msg="Failed to add reserve capacity volume! Base volumes %s. Group [%s]. Error [%s]." + " Array [%s]." % (", ".join([volume for volume in member_volume_request.keys()]), self.group_name, error, self.ssid)) + + def remove_base_volumes(self, volume_info_list): + """Add base volume(s) to the consistency group.""" + group_id = self.get_consistency_group()["consistency_group_id"] + + for name, info in volume_info_list.items(): + try: + rc, resp = self.request("storage-systems/%s/consistency-groups/%s/member-volumes/%s" % (self.ssid, group_id, info["id"]), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to remove reserve capacity volume! Base volume [%s]. Group [%s]. Error [%s]. " + "Array [%s]." % (name, self.group_name, error, self.ssid)) + + def expand_reserve_capacities(self, reserve_volumes): + """Expand base volume(s) reserve capacity.""" + for volume_name, volume_info in reserve_volumes.items(): + candidate = self.get_candidate(volume_name, volume_info) + expand_request = {"repositoryRef": volume_info["reserve_volume_id"], + "expansionCandidate": candidate["candidate"]["candidate"]} + try: + rc, resp = self.request("/storage-systems/%s/repositories/concat/%s/expand" % (self.ssid, volume_info["reserve_volume_id"]), + method="POST", data=expand_request) + except Exception as error: + self.module.fail_json(msg="Failed to expand reserve capacity volume! Group [%s]. Error [%s]. Array [%s]." % (self.group_name, error, self.ssid)) + + def trim_reserve_capacities(self, trim_reserve_volume_info_list): + """trim base volume(s) reserve capacity.""" + for info in trim_reserve_volume_info_list: + trim_request = {"concatVol": info["concat_volume_id"], + "trimCount": info["trim_count"], + "retainRepositoryMembers": False} + try: + rc, trim = self.request("storage-systems/%s/symbol/trimConcatVolume?verboseErrorResponse=true" % self.ssid, method="POST", data=trim_request) + except Exception as error: + self.module.fail_json(msg="Failed to trim reserve capacity. Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + def create_pit_images(self): + """Generate snapshot image(s) for the base volumes in the consistency group.""" + group_id = self.get_consistency_group()["consistency_group_id"] + + try: + rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots" % (self.ssid, group_id), method="POST") + + # Embedded web services should store the pit_image metadata since sending it to the proxy will be written to it instead. + if self.pit_name: + try: + rc, key_values = self.request(self.url_path_prefix + "key-values/ansible|%s|%s" % (self.group_name, self.pit_name), method="POST", + data="%s|%s|%s" % (images[0]["pitTimestamp"], self.pit_name, self.pit_description)) + except Exception as error: + self.module.fail_json(msg="Failed to create metadata for snapshot images!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + except Exception as error: + self.module.fail_json(msg="Failed to create consistency group snapshot images!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + def remove_pit_images(self, pit_info): + """Remove selected snapshot point-in-time images.""" + group_id = self.get_consistency_group()["consistency_group_id"] + + pit_sequence_number = int(pit_info["sequence_number"]) + sequence_numbers = set(int(pit_image["sequence_number"]) for timestamp, pit_image in self.get_pit_images_by_timestamp().items() + if int(pit_image["sequence_number"]) < pit_sequence_number) + sequence_numbers.add(pit_sequence_number) + + for sequence_number in sorted(sequence_numbers): + + try: + rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots/%s" % (self.ssid, group_id, sequence_number), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to create consistency group snapshot images!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + # Embedded web services should store the pit_image metadata since sending it to the proxy will be written to it instead. + if self.pit_name: + try: + rc, key_values = self.request(self.url_path_prefix + "key-values/ansible|%s|%s" % (self.group_name, self.pit_name), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete metadata for snapshot images!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + def cleanup_old_pit_metadata(self, keys): + """Delete unused point-in-time image metadata.""" + for key in keys: + try: + rc, images = self.request("key-values/%s" % key, method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to purge unused point-in-time image metadata! Key [%s]. Array [%s]." + " Error [%s]." % (key, self.ssid, error)) + + def create_view(self, view_info): + """Generate consistency group view.""" + group_id = self.get_consistency_group()["consistency_group_id"] + view_request = {"name": view_info["name"], + "pitSequenceNumber": view_info["sequence_number"], + "requests": []} + + for volume_name, volume_info in view_info["volumes"].items(): + candidate = None + if volume_info["snapshot_volume_writable"]: + candidate = self.get_candidate(volume_name, volume_info) + + for image in view_info["images"]: + if volume_name == image["base_volume_name"]: + view_request["requests"].append({"pitId": image["id"], + "candidate": candidate["candidate"]["candidate"] if candidate else None, + "accessMode": "readWrite" if volume_info["snapshot_volume_writable"] else "readOnly", + "scanMedia": volume_info["snapshot_volume_validate"], + "validateParity": volume_info["snapshot_volume_validate"]}) + break + else: + self.module.fail_json(msg="Base volume does not exist! Volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid)) + try: + rc, images = self.request("storage-systems/%s/consistency-groups/%s/views/batch" % (self.ssid, group_id), method="POST", data=view_request) + + # Determine snapshot volume mappings + view = self.get_consistency_group_view() + existing_volumes_by_id = self.get_all_volumes_by_id() + existing_hosts_by_name = self.get_all_hosts_and_hostgroups_by_name() + for volume_name, volume_info in self.volumes.items(): + if volume_info["snapshot_volume_host"]: + for snapshot_volume in view["snapshot_volumes"]: + if volume_name == existing_volumes_by_id[snapshot_volume["baseVol"]]["name"]: + snapshot_volume_map_request = {"mappableObjectId": snapshot_volume["id"], + "lun": volume_info["snapshot_volume_lun"], + "targetId": existing_hosts_by_name[volume_info["snapshot_volume_host"]]["id"]} + try: + rc, mapping = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=snapshot_volume_map_request) + except Exception as error: + self.module.fail_json(msg="Failed to map snapshot volume! Snapshot volume [%s]. View [%s]. Group [%s]. Array [%s]." + " Error [%s]" % (snapshot_volume["name"], self.view_name, self.group_name, self.ssid, error)) + break + except Exception as error: + self.module.fail_json(msg="Failed to create consistency group snapshot volumes!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + def map_view(self, map_information_list): + """Map consistency group point-in-time snapshot volumes to host or host group.""" + existing_volumes = self.get_all_volumes_by_id() + existing_host_or_hostgroups = self.get_all_hosts_and_hostgroups_by_id() + for map_request in map_information_list: + try: + rc, mapping = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=map_request) + except Exception as error: + self.module.fail_json(msg="Failed to map snapshot volume! Snapshot volume [%s]. Target [%s]. Lun [%s]. Group [%s]. Array [%s]." + " Error [%s]." % (existing_volumes[map_request["mappableObjectId"]], + existing_host_or_hostgroups[map_request["targetId"]], + map_request["lun"], self.group_name, self.ssid, error)) + + def unmap_view(self, unmap_info_list): + """Unmap consistency group point-in-time snapshot volumes from host or host group.""" + for unmap_info in unmap_info_list: + try: + rc, unmap = self.request("storage-systems/%s/volume-mappings/%s" % (self.ssid, unmap_info["lun_mapping_reference"]), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to unmap snapshot volume! Snapshot volume [%s]. View [%s]. Group [%s]. Array [%s]." + " Error [%s]." % (unmap_info["snapshot_volume_name"], self.view_name, self.group_name, self.ssid, error)) + + def move_view_mapping(self, map_information_list): + """Move consistency group point-in-time snapshot volumes to a different host or host group.""" + existing_volumes = self.get_all_volumes_by_id() + existing_host_or_hostgroups = self.get_all_hosts_and_hostgroups_by_id() + for map_request in map_information_list: + try: + rc, mapping = self.request("storage-systems/%s/symbol/moveLUNMapping?verboseErrorResponse=true" % self.ssid, method="POST", data=map_request) + except Exception as error: + self.module.fail_json(msg="Failed to move snapshot volume mapping! Snapshot volume [%s]. Target [%s]. Lun [%s]. Group [%s]. Array [%s]." + " Error [%s]." % (existing_volumes[map_request["mappableObjectId"]], + existing_host_or_hostgroups[map_request["targetId"]], + map_request["lun"], self.group_name, self.ssid, error)) + + def convert_view_to_writable(self, convert_view_information_list): + """Make consistency group point-in-time snapshot volumes writable.""" + for volume_name, volume_info in convert_view_information_list.items(): + candidate = self.get_candidate(volume_name, volume_info) + convert_request = {"fullThreshold": self.alert_threshold_pct, + "repositoryCandidate": candidate["candidate"]["candidate"]} + try: + rc, convert = self.request("/storage-systems/%s/snapshot-volumes/%s/convertReadOnly" % (self.ssid, volume_info["snapshot_volume_id"]), + method="POST", data=convert_request) + except Exception as error: + self.module.fail_json(msg="Failed to convert snapshot volume to read/write! Snapshot volume [%s]. View [%s] Group [%s]. Array [%s]." + " Error [%s]." % (volume_info["snapshot_volume_id"], self.view_name, self.group_name, self.ssid, error)) + + def remove_view(self, view_id): + """Remove a consistency group view.""" + group_id = self.get_consistency_group()["consistency_group_id"] + + try: + rc, images = self.request("storage-systems/%s/consistency-groups/%s/views/%s" % (self.ssid, group_id, view_id), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to create consistency group snapshot volumes!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + def rollback(self, rollback_info): + """Rollback consistency group base volumes to point-in-time snapshot images.""" + group_info = self.get_consistency_group() + group_id = group_info["consistency_group_id"] + + if self.rollback_backup: + self.create_pit_images() + + # Ensure consistency group rollback priority is set correctly prior to rollback. + if self.rollback_priority: + try: + rc, resp = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, group_id), method="POST", + data={"rollbackPriority": self.rollback_priority}) + except Exception as error: + self.module.fail_json(msg="Failed to updated consistency group rollback priority!" + " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + try: + rc, resp = self.request("storage-systems/%s/symbol/startPITRollback" % self.ssid, method="POST", + data={"pitRef": [image["id"] for image in rollback_info["images"]]}) + except Exception as error: + self.module.fail_json(msg="Failed to initiate rollback operations!" " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error)) + + def complete_volume_definitions(self): + """Determine the complete self.volumes structure.""" + group = self.get_consistency_group() + + if not self.volumes: + for volume in group["base_volumes"]: + self.volumes.update({volume["name"]: {"reserve_capacity_pct": self.reserve_capacity_pct, + "preferred_reserve_storage_pool": self.preferred_reserve_storage_pool, + "snapshot_volume_writable": self.view_writable, + "snapshot_volume_validate": self.view_validate, + "snapshot_volume_host": self.view_host, + "snapshot_volume_lun": None}}) + + # Ensure a preferred_reserve_storage_pool has been selected + existing_storage_pools_by_id = self.get_all_storage_pools_by_id() + existing_storage_pools_by_name = self.get_all_storage_pools_by_name() + existing_volumes_by_name = self.get_all_volumes_by_name() + existing_volumes_by_id = self.get_all_volumes_by_id() + existing_mappings = self.get_mapping_by_id() + existing_host_and_hostgroup_by_id = self.get_all_hosts_and_hostgroups_by_id() + existing_host_and_hostgroup_by_name = self.get_all_hosts_and_hostgroups_by_name() + for volume_name, volume_info in self.volumes.items(): + base_volume_storage_pool_id = existing_volumes_by_name[volume_name]["volumeGroupRef"] + base_volume_storage_pool_name = existing_storage_pools_by_id[base_volume_storage_pool_id]["name"] + + # Check storage group information. + if not volume_info["preferred_reserve_storage_pool"]: + volume_info["preferred_reserve_storage_pool"] = base_volume_storage_pool_name + elif volume_info["preferred_reserve_storage_pool"] not in existing_storage_pools_by_name.keys(): + self.module.fail_json(msg="Preferred storage pool or volume group does not exist! Storage pool [%s]. Group [%s]." + " Array [%s]." % (volume_info["preferred_reserve_storage_pool"], self.group_name, self.ssid)) + + # Check host mapping information + if self.state == "present" and self.type == "view": + view_info = self.get_consistency_group_view() + + if volume_info["snapshot_volume_host"]: + if volume_info["snapshot_volume_host"] not in existing_host_and_hostgroup_by_name: + self.module.fail_json(msg="Specified host or host group does not exist! Host [%s]. Group [%s]." + " Array [%s]." % (volume_info["snapshot_volume_host"], self.group_name, self.ssid)) + + if not volume_info["snapshot_volume_lun"]: + if view_info: + for snapshot_volume in view_info["snapshot_volumes"]: + if snapshot_volume["listOfMappings"]: + mapping = snapshot_volume["listOfMappings"][0] + if (volume_name == existing_volumes_by_id[snapshot_volume["baseVol"]]["name"] and + volume_info["snapshot_volume_host"] == existing_host_and_hostgroup_by_id[mapping["mapRef"]]["name"]): + volume_info["snapshot_volume_lun"] = mapping["lun"] + break + else: + host_id = existing_host_and_hostgroup_by_name[volume_info["snapshot_volume_host"]]["id"] + for next_lun in range(1, 100): + + if host_id not in existing_mappings.keys(): + existing_mappings.update({host_id: {}}) + + if next_lun not in existing_mappings[host_id].keys(): + volume_info["snapshot_volume_lun"] = next_lun + existing_mappings[host_id].update({next_lun: None}) + break + + def apply(self): + """Apply any required snapshot state changes.""" + changes_required = False + group = self.get_consistency_group() + group_changes = {} + + # Determine which changes are required. + if group: + + # Determine whether changes are required. + if self.state == "absent": + if self.type == "group": + if self.group_name: + changes_required = True + elif self.type == "pit": + group_changes = self.get_pit_info() + if group_changes: + changes_required = True + elif self.type == "view": + group_changes = self.get_consistency_group_view() + if group_changes: + changes_required = True + + elif self.state == "present": + self.complete_volume_definitions() + + if self.type == "group": + group_changes = self.update_changes_required() + if (group_changes["update_group"] or + group_changes["add_volumes"] or + group_changes["remove_volumes"] or + group_changes["expand_reserve_capacity"] or + group_changes["trim_reserve_capacity"]): + changes_required = True + + elif self.type == "pit": + changes_required = True + + elif self.type == "view": + if self.get_consistency_group_view(): + group_changes = self.update_view_changes_required() + if (group_changes["expand_reserve_capacity"] or + group_changes["trim_reserve_capacity"] or + group_changes["map_snapshot_volumes_mapping"] or + group_changes["unmap_snapshot_volumes_mapping"] or + group_changes["move_snapshot_volumes_mapping"] or + group_changes["update_snapshot_volumes_writable"]): + changes_required = True + else: + group_changes = self.create_view_changes_required() + changes_required = True + + elif self.state == "rollback": + self.complete_volume_definitions() + if not self.volumes: + for volume in group["base_volumes"]: + self.volumes.update({volume["name"]: None}) + group_changes = self.rollback_changes_required() + if group_changes: + changes_required = True + + else: + if self.state == "present": + if self.type == "group": + self.complete_volume_definitions() + group_changes = self.create_changes_required() + changes_required = True + elif self.type == "pit": + self.module.fail_json(msg="Snapshot point-in-time images cannot be taken when the snapshot consistency group does not exist!" + " Group [%s]. Array [%s]." % (self.group_name, self.ssid)) + elif self.type == "view": + self.module.fail_json(msg="Snapshot view cannot be created when the snapshot consistency group does not exist!" + " Group [%s]. Array [%s]." % (self.group_name, self.ssid)) + elif self.state == "rollback": + self.module.fail_json(msg="Rollback operation is not available when the snapshot consistency group does not exist!" + " Group [%s]. Array [%s]." % (self.group_name, self.ssid)) + + # Determine if they're any key-value pairs that need to be cleaned up since snapshot pit images were deleted outside of this module. + unused_pit_keys = self.get_unused_pit_key() + + # Apply any required changes. + if (changes_required or unused_pit_keys) and not self.module.check_mode: + if group: + if self.state == "absent": + if self.type == "group": + self.remove_snapshot_consistency_group(group) + elif self.type == "pit": + self.remove_pit_images(group_changes) + elif self.type == "view": + self.remove_view(group_changes["id"]) + + elif self.state == "present": + + if self.type == "group": + if group_changes["update_group"]: + self.update_snapshot_consistency_group(group_changes["update_group"]) + if group_changes["add_volumes"]: + self.add_base_volumes(group_changes["add_volumes"]) + if group_changes["remove_volumes"]: + self.remove_base_volumes(group_changes["remove_volumes"]) + if group_changes["trim_reserve_capacity"]: + self.trim_reserve_capacities(group_changes["trim_reserve_capacity"]) + if group_changes["expand_reserve_capacity"]: + sleep(15) + if group_changes["expand_reserve_capacity"]: + self.expand_reserve_capacities(group_changes["expand_reserve_capacity"]) + + elif self.type == "pit": + self.create_pit_images() + + elif self.type == "view": + view = self.get_consistency_group_view() + if view: + if group_changes["trim_reserve_capacity"]: + self.trim_reserve_capacities(group_changes["trim_reserve_capacity"]) + if group_changes["expand_reserve_capacity"]: + sleep(15) + if group_changes["expand_reserve_capacity"]: + self.expand_reserve_capacities(group_changes["expand_reserve_capacity"]) + if group_changes["map_snapshot_volumes_mapping"]: + self.map_view(group_changes["map_snapshot_volumes_mapping"]) + if group_changes["unmap_snapshot_volumes_mapping"]: + self.unmap_view(group_changes["unmap_snapshot_volumes_mapping"]) + if group_changes["move_snapshot_volumes_mapping"]: + self.move_view_mapping(group_changes["move_snapshot_volumes_mapping"]) + if group_changes["update_snapshot_volumes_writable"]: + self.convert_view_to_writable(group_changes["update_snapshot_volumes_writable"]) + else: + self.create_view(group_changes) + + elif self.state == "rollback": + self.rollback(group_changes) + + elif self.type == "group": + self.create_snapshot_consistency_group(group_changes["create_group"]) + self.add_base_volumes(group_changes["add_volumes"]) + + if unused_pit_keys: + self.cleanup_old_pit_metadata() + + self.module.exit_json(changed=changes_required, group_changes=group_changes, deleted_metadata_keys=unused_pit_keys) + + +def main(): + snapshot = NetAppESeriesSnapshot() + snapshot.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py new file mode 100644 index 000000000..daf2308d7 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_storagepool.py @@ -0,0 +1,1057 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_storagepool +short_description: NetApp E-Series manage volume groups and disk pools +description: Create or remove volume groups and disk pools for NetApp E-series storage arrays. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - Whether the specified storage pool should exist or not. + - Note that removing a storage pool currently requires the removal of all defined volumes first. + type: str + choices: ["present", "absent"] + default: "present" + name: + description: + - The name of the storage pool to manage + type: str + required: true + criteria_drive_count: + description: + - The number of disks to use for building the storage pool. + - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified. + - The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below) + type: int + required: false + criteria_min_usable_capacity: + description: + - The minimum size of the storage pool (in size_unit). + - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified. + - The pool will be expanded if this value exceeds its current size. (See expansion note below) + - Do not use when the storage system contains mixed drives and I(usable_drives) is specified since usable capacities may not be accurate. + type: float + required: false + criteria_drive_type: + description: + - The type of disk (hdd or ssd) to use when searching for candidates to use. + - When not specified each drive type will be evaluated until successful drive candidates are found starting with + the most prevalent drive type. + type: str + choices: ["hdd","ssd"] + required: false + criteria_size_unit: + description: + - The unit used to interpret size parameters + type: str + choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"] + default: "gb" + required: false + criteria_drive_min_size: + description: + - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool. + type: float + required: false + criteria_drive_max_size: + description: + - The maximum individual drive size (in size_unit) to consider when choosing drives for the storage pool. + type: float + required: false + criteria_drive_interface_type: + description: + - The interface type to use when selecting drives for the storage pool + - If not provided then all interface types will be considered. + type: str + choices: ["scsi", "fibre", "sata", "pata", "fibre520b", "sas", "sas4k", "nvme4k"] + required: false + criteria_drive_require_da: + description: + - Ensures the storage pool will be created with only data assurance (DA) capable drives. + - Only available for new storage pools; existing storage pools cannot be converted. + type: bool + default: false + required: false + criteria_drive_require_fde: + description: + - Whether full disk encryption ability is required for drives to be added to the storage pool + type: bool + default: false + required: false + usable_drives: + description: + - Ordered comma-separated list of tray/drive slots to be selected for drive candidates (drives that are used will be skipped). + - Each drive entry is represented as <tray_number>:<(optional) drawer_number>:<drive_slot_number> (e.g. 99:0 is the base tray's drive slot 0). + - The base tray's default identifier is 99 and expansion trays are added in the order they are attached but these identifiers can be changed by the user. + - Be aware that trays with multiple drawers still have a dedicated drive slot for all drives and the slot number does not rely on the drawer; however, + if you're planing to have drawer protection you need to order accordingly. + - When I(usable_drives) are not provided then the drive candidates will be selected by the storage system. + type: str + required: false + raid_level: + description: + - The RAID level of the storage pool to be created. + - Required only when I(state=="present"). + - When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required + depending on the storage array specifications. + - When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required. + - When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required. + - When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required. + - When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required. + - When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required. + - Note that raidAll will be treated as raidDiskPool and raid3 as raid5. + type: str + default: "raidDiskPool" + choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"] + required: false + secure_pool: + description: + - Enables security at rest feature on the storage pool. + - Will only work if all drives in the pool are security capable (FDE, FIPS, or mix) + - Warning, once security is enabled it is impossible to disable without erasing the drives. + type: bool + required: false + reserve_drive_count: + description: + - Set the number of drives reserved by the storage pool for reconstruction operations. + - Only valid on raid disk pools. + type: int + required: false + remove_volumes: + description: + - Prior to removing a storage pool, delete all volumes in the pool. + type: bool + default: true + required: false + erase_secured_drives: + description: + - If I(state=="absent") then all storage pool drives will be erase + - If I(state=="present") then delete all available storage array drives that have security enabled. + type: bool + default: true + required: false + ddp_critical_threshold_pct: + description: + - Issues a critical alert when threshold of storage has been allocated. + - Only applicable when I(raid_level=="raidDiskPool"). + - Set I(ddp_critical_threshold_pct==0) to disable alert. + type: int + default: 85 + required: false + ddp_warning_threshold_pct: + description: + - Issues a warning alert when threshold of storage has been allocated. + - Only applicable when I(raid_level=="raidDiskPool"). + - Set I(ddp_warning_threshold_pct==0) to disable alert. + type: int + default: 85 + required: false +notes: + - The expansion operations are non-blocking due to the time consuming nature of expanding volume groups + - Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each + required step will be attempted until the request fails which is likely because of the required expansion time. + - raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5. + - Tray loss protection and drawer loss protection will be chosen if at all possible. +""" +EXAMPLES = """ +- name: No disk groups + na_santricity_storagepool: + ssid: "{{ ssid }}" + name: "{{ item }}" + state: absent + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: Json facts for the pool that was created. +""" +import functools +from itertools import groupby +from time import sleep + +from pprint import pformat +from ansible.module_utils._text import to_native +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule + + +def get_most_common_elements(iterator): + """Returns a generator containing a descending list of most common elements.""" + if not isinstance(iterator, list): + raise TypeError("iterator must be a list.") + + grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))] + return sorted(grouped, key=lambda x: x[1], reverse=True) + + +def memoize(func): + """Generic memoizer for any function with any number of arguments including zero.""" + + @functools.wraps(func) + def wrapper(*args, **kwargs): + class MemoizeFuncArgs(dict): + def __missing__(self, _key): + self[_key] = func(*args, **kwargs) + return self[_key] + + key = str((args, kwargs)) if args and kwargs else "no_argument_response" + return MemoizeFuncArgs().__getitem__(key) + + return wrapper + + +class NetAppESeriesStoragePool(NetAppESeriesModule): + EXPANSION_TIMEOUT_SEC = 10 + DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11 + + def __init__(self): + version = "02.00.0000.0000" + ansible_options = dict( + state=dict(choices=["present", "absent"], default="present", type="str"), + name=dict(required=True, type="str"), + criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"], + default="gb", type="str"), + criteria_drive_count=dict(type="int"), + criteria_drive_interface_type=dict(choices=["scsi", "fibre", "sata", "pata", "fibre520b", "sas", "sas4k", "nvme4k"], type="str"), + criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False), + criteria_drive_min_size=dict(type="float"), + criteria_drive_max_size=dict(type="float"), + criteria_drive_require_da=dict(type="bool", required=False), + criteria_drive_require_fde=dict(type="bool", required=False), + criteria_min_usable_capacity=dict(type="float"), + usable_drives=dict(type="str", required=False), + raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"], + default="raidDiskPool"), + erase_secured_drives=dict(type="bool", default=True), + secure_pool=dict(type="bool", default=False), + reserve_drive_count=dict(type="int"), + remove_volumes=dict(type="bool", default=True), + ddp_critical_threshold_pct=dict(type="int", default=85, required=False), + ddp_warning_threshold_pct=dict(type="int", default=0, required=False)) + + required_if = [["state", "present", ["raid_level"]]] + super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options, + web_services_version=version, + supports_check_mode=True, + required_if=required_if) + + args = self.module.params + self.state = args["state"] + self.ssid = args["ssid"] + self.name = args["name"] + self.criteria_drive_count = args["criteria_drive_count"] + self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"] + self.criteria_size_unit = args["criteria_size_unit"] + self.criteria_drive_min_size = args["criteria_drive_min_size"] + self.criteria_drive_max_size = args["criteria_drive_max_size"] + self.criteria_drive_type = args["criteria_drive_type"] + self.criteria_drive_interface_type = args["criteria_drive_interface_type"] + self.criteria_drive_require_fde = args["criteria_drive_require_fde"] + self.criteria_drive_require_da = args["criteria_drive_require_da"] + self.raid_level = args["raid_level"] + self.erase_secured_drives = args["erase_secured_drives"] + self.secure_pool = args["secure_pool"] + self.reserve_drive_count = args["reserve_drive_count"] + self.remove_volumes = args["remove_volumes"] + self.ddp_critical_threshold_pct = args["ddp_critical_threshold_pct"] + self.ddp_warning_threshold_pct = args["ddp_warning_threshold_pct"] + self.pool_detail = None + + if self.ddp_critical_threshold_pct < 0 or self.ddp_critical_threshold_pct > 100: + self.module.fail_json(msg="Invalid I(ddp_critical_threshold_pct) value! Must between or equal to 0 and 100. Array [%s]" % self.ssid) + if self.ddp_warning_threshold_pct < 0 or self.ddp_warning_threshold_pct > 100: + self.module.fail_json(msg="Invalid I(ddp_warning_threshold_pct) value! Must between or equal to 0 and 100. Array [%s]" % self.ssid) + + # Change all sizes to be measured in bytes + if self.criteria_min_usable_capacity: + self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity * self.SIZE_UNIT_MAP[self.criteria_size_unit]) + if self.criteria_drive_min_size: + self.criteria_drive_min_size = int(self.criteria_drive_min_size * self.SIZE_UNIT_MAP[self.criteria_size_unit]) + if self.criteria_drive_max_size: + self.criteria_drive_max_size = int(self.criteria_drive_max_size * self.SIZE_UNIT_MAP[self.criteria_size_unit]) + self.criteria_size_unit = "bytes" + + # Adjust unused raid level option to reflect documentation + if self.raid_level == "raidAll": + self.raid_level = "raidDiskPool" + if self.raid_level == "raid3": + self.raid_level = "raid5" + + # Parse usable drive string into tray:slot list + self.usable_drives = [] + if args["usable_drives"]: + for usable_drive in args["usable_drives"].split(","): + location = [int(item) for item in usable_drive.split(":")] + if len(location) == 2: + tray, slot = location + self.usable_drives.append([tray, 0, slot + 1]) # slot must be one-indexed instead of zero. + elif len(location) == 3: + tray, drawer, slot = location + self.usable_drives.append([tray, drawer - 1, slot + 1]) # slot must be one-indexed instead of zero. + else: + self.module.fail_json(msg="Invalid I(usable_drives) value! Must be a comma-separated list of <TRAY_NUMBER>:<DRIVE_SLOT_NUMBER> entries." + " Array [%s]." % self.ssid) + + @property + @memoize + def available_drives(self): + """Determine the list of available drives""" + return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"] + + @property + @memoize + def available_drive_types(self): + """Determine the types of available drives sorted by the most common first.""" + types = [drive["driveMediaType"] for drive in self.drives] + return [entry[0] for entry in get_most_common_elements(types)] + + @property + @memoize + def available_drive_interface_types(self): + """Determine the types of available drives.""" + interfaces = [drive["phyDriveType"] for drive in self.drives] + return [entry[0] for entry in get_most_common_elements(interfaces)] + + @property + def storage_pool_drives(self): + """Retrieve list of drives found in storage pool.""" + return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]] + + @property + def expandable_drive_count(self): + """Maximum number of drives that a storage pool can be expended at a given time.""" + capabilities = None + if self.raid_level == "raidDiskPool": + return len(self.available_drives) + + try: + rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + return capabilities["featureParameters"]["maxDCEDrives"] + + @property + def disk_pool_drive_minimum(self): + """Provide the storage array's minimum disk pool drive count.""" + rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True) + + # Standard minimum is 11 drives but some allow 10 drives. 10 will be the default + if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or + attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0): + return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT + + return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] + + def get_available_drive_capacities(self, drive_id_list=None): + """Determine the list of available drive capacities.""" + if drive_id_list: + available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives + if drive["id"] in drive_id_list and drive["available"] and + drive["status"] == "optimal"]) + else: + available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives + if drive["available"] and drive["status"] == "optimal"]) + + self.module.log("available drive capacities: %s" % available_drive_capacities) + return list(available_drive_capacities) + + @property + def drives(self): + """Retrieve list of drives found in storage system.""" + drives = None + try: + rc, drives = self.request("storage-systems/%s/drives" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return drives + + def tray_by_ids(self): + """Retrieve list of trays found in storage system and return dictionary of trays keyed by ids.""" + tray_by_ids = {} + try: + rc, inventory = self.request("storage-systems/%s/hardware-inventory" % self.ssid) + for tray in inventory["trays"]: + tray_by_ids.update({tray["trayRef"]: {"tray_number": tray["trayId"], + "drawer_count": tray["driveLayout"]["numRows"] * tray["driveLayout"]["numColumns"]}}) + except Exception as error: + self.module.fail_json(msg="Failed to fetch trays. Array id [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return tray_by_ids + + def convert_drives_list_into_drive_info_by_ids(self): + """Determine drive identifiers base on provided drive list. Provide usable_ids list to select subset.""" + tray_by_ids = self.tray_by_ids() + + drives = [] + for usable_drive in self.usable_drives: + tray, drawer, slot = usable_drive + for drive in self.drives: + drawer_slot = drawer * tray_by_ids[drive["physicalLocation"]["trayRef"]]["drawer_count"] + slot + if drawer_slot == drive["physicalLocation"]["slot"] and tray == tray_by_ids[drive["physicalLocation"]["trayRef"]]["tray_number"]: + if drive["available"]: + drives.append(drive["id"]) + break + + return drives + + def is_drive_count_valid(self, drive_count): + """Validate drive count criteria is met.""" + if self.criteria_drive_count and drive_count < self.criteria_drive_count: + return False + + if self.raid_level == "raidDiskPool": + return drive_count >= self.disk_pool_drive_minimum + if self.raid_level == "raid0": + return drive_count > 0 + if self.raid_level == "raid1": + return drive_count >= 2 and (drive_count % 2) == 0 + if self.raid_level in ["raid3", "raid5"]: + return 3 <= drive_count <= 30 + if self.raid_level == "raid6": + return 5 <= drive_count <= 30 + return False + + @property + def storage_pool(self): + """Retrieve storage pool information.""" + storage_pools_resp = None + try: + rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error [%s]. State[%s]." + % (self.ssid, to_native(err), self.state)) + + pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name] + return pool_detail[0] if pool_detail else dict() + + @property + def storage_pool_volumes(self): + """Retrieve list of volumes associated with storage pool.""" + volumes_resp = None + try: + rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error [%s]. State[%s]." + % (self.ssid, to_native(err), self.state)) + + group_ref = self.storage_pool["volumeGroupRef"] + storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref] + return storage_pool_volume_list + + def get_ddp_capacity(self, expansion_drive_list): + """Return the total usable capacity based on the additional drives.""" + + def get_ddp_error_percent(_drive_count, _extent_count): + """Determine the space reserved for reconstruction""" + if _drive_count <= 36: + if _extent_count <= 600: + return 0.40 + elif _extent_count <= 1400: + return 0.35 + elif _extent_count <= 6200: + return 0.20 + elif _extent_count <= 50000: + return 0.15 + elif _drive_count <= 64: + if _extent_count <= 600: + return 0.20 + elif _extent_count <= 1400: + return 0.15 + elif _extent_count <= 6200: + return 0.10 + elif _extent_count <= 50000: + return 0.05 + elif _drive_count <= 480: + if _extent_count <= 600: + return 0.20 + elif _extent_count <= 1400: + return 0.15 + elif _extent_count <= 6200: + return 0.10 + elif _extent_count <= 50000: + return 0.05 + + self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid) + + def get_ddp_reserved_drive_count(_disk_count): + """Determine the number of reserved drive.""" + reserve_count = 0 + + if self.reserve_drive_count: + reserve_count = self.reserve_drive_count + elif _disk_count >= 256: + reserve_count = 8 + elif _disk_count >= 192: + reserve_count = 7 + elif _disk_count >= 128: + reserve_count = 6 + elif _disk_count >= 64: + reserve_count = 4 + elif _disk_count >= 32: + reserve_count = 3 + elif _disk_count >= 12: + reserve_count = 2 + elif _disk_count == 11: + reserve_count = 1 + + return reserve_count + + if self.pool_detail: + drive_count = len(self.storage_pool_drives) + len(expansion_drive_list) + else: + drive_count = len(expansion_drive_list) + + drive_usable_capacity = min(min(self.get_available_drive_capacities()), + min(self.get_available_drive_capacities(expansion_drive_list))) + drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912) + maximum_stripe_count = (drive_count * drive_data_extents) / 10 + + error_percent = get_ddp_error_percent(drive_count, drive_data_extents) + error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10 + + total_stripe_count = maximum_stripe_count - error_overhead + stripe_count_per_drive = total_stripe_count / drive_count + reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive + available_stripe_count = total_stripe_count - reserved_stripe_count + + return available_stripe_count * 4294967296 + + def get_candidate_drive_request(self): + """Perform request for new volume creation.""" + + candidates_list = list() + drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types + interface_types = [self.criteria_drive_interface_type] \ + if self.criteria_drive_interface_type else self.available_drive_interface_types + + for interface_type in interface_types: + for drive_type in drive_types: + candidates = None + volume_candidate_request_data = dict( + type="diskPool" if self.raid_level == "raidDiskPool" else "traditional", + diskPoolVolumeCandidateRequestData=dict( + reconstructionReservedDriveCount=65535)) + candidate_selection_type = dict( + candidateSelectionType="count", + driveRefList=dict(driveRef=self.available_drives)) + criteria = dict(raidLevel=self.raid_level, + phyDriveType=interface_type, + dssPreallocEnabled=False, + securityType="capable" if self.criteria_drive_require_fde else "none", + driveMediaType=drive_type, + onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False, + volumeCandidateRequestData=volume_candidate_request_data, + allocateReserveSpace=False, + securityLevel="fde" if self.criteria_drive_require_fde else "none", + candidateSelectionType=candidate_selection_type) + + try: + rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError" + "Response=true" % self.ssid, data=criteria, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + if candidates: + candidates_list.extend(candidates["volumeCandidate"]) + + if candidates_list and not self.usable_drives: + def candidate_sort_function(entry): + """Orders candidates based on tray/drawer loss protection.""" + preference = 3 + if entry["drawerLossProtection"]: + preference -= 1 + if entry["trayLossProtection"]: + preference -= 2 + return preference + + candidates_list.sort(key=candidate_sort_function) + + # Replace drive selection with required usable drives + if self.usable_drives: + drives = self.convert_drives_list_into_drive_info_by_ids() + for candidates in candidates_list: + candidates["driveRefList"].update({"driveRef": drives[0:candidates["driveCount"]]}) + + return candidates_list + + @memoize + def get_candidate_drives(self): + """Retrieve set of drives candidates for creating a new storage pool.""" + for candidate in self.get_candidate_drive_request(): + + # Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size + if self.criteria_drive_count: + if self.criteria_drive_count != int(candidate["driveCount"]): + continue + if self.criteria_min_usable_capacity: + if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity > + self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or + self.criteria_min_usable_capacity > int(candidate["usableSize"])): + continue + if self.criteria_drive_min_size: + if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])): + continue + if self.criteria_drive_max_size: + if self.criteria_drive_max_size < min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])): + continue + + return candidate + + self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid) + + @memoize + def get_expansion_candidate_drives(self): + """Retrieve required expansion drive list. + + Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there + is a potential limitation on how many drives can be incorporated at a time. + * Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools. + + :return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint + """ + + def get_expansion_candidate_drive_request(): + """Perform the request for expanding existing volume groups or disk pools. + + Note: the list of candidate structures do not necessarily produce candidates that meet all criteria. + """ + candidates_list = None + url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid + + try: + rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"]) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + return candidates_list["candidates"] + + required_candidate_list = list() + required_additional_drives = 0 + required_additional_capacity = 0 + total_required_capacity = 0 + + # determine whether and how much expansion is need to satisfy the specified criteria + if self.criteria_min_usable_capacity: + total_required_capacity = self.criteria_min_usable_capacity + required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"]) + + if self.criteria_drive_count: + required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives) + + # Determine the appropriate expansion candidate list + if required_additional_drives > 0 or required_additional_capacity > 0: + for candidate in get_expansion_candidate_drive_request(): + + if self.criteria_drive_min_size: + if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])): + continue + if self.criteria_drive_max_size: + if self.criteria_drive_max_size < min(self.get_available_drive_capacities(candidate["drives"])): + continue + + if self.raid_level == "raidDiskPool": + if (len(candidate["drives"]) >= required_additional_drives and + self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity): + required_candidate_list.append(candidate) + break + else: + required_additional_drives -= len(candidate["drives"]) + required_additional_capacity -= int(candidate["usableCapacity"]) + required_candidate_list.append(candidate) + + # Determine if required drives and capacities are satisfied + if required_additional_drives <= 0 and required_additional_capacity <= 0: + break + else: + self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid) + + return required_candidate_list + + def get_reserve_drive_count(self): + """Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool).""" + + if not self.pool_detail: + self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid) + + if self.raid_level != "raidDiskPool": + self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]." + % (self.pool_detail["id"], self.ssid)) + + return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"] + + def get_maximum_reserve_drive_count(self): + """Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool).""" + if self.raid_level != "raidDiskPool": + self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]." + % (self.pool_detail["id"], self.ssid)) + + drives_ids = list() + + if self.pool_detail: + drives_ids.extend(self.storage_pool_drives) + for candidate in self.get_expansion_candidate_drives(): + drives_ids.extend((candidate["drives"])) + else: + candidate = self.get_candidate_drives() + drives_ids.extend(candidate["driveRefList"]["driveRef"]) + + drive_count = len(drives_ids) + maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10) + if maximum_reserve_drive_count > 10: + maximum_reserve_drive_count = 10 + + return maximum_reserve_drive_count + + def set_reserve_drive_count(self, check_mode=False): + """Set the reserve drive count for raidDiskPool.""" + changed = False + + if self.raid_level == "raidDiskPool" and self.reserve_drive_count: + maximum_count = self.get_maximum_reserve_drive_count() + + if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count: + self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. " + "Note that it may be necessary to wait for expansion operations to complete " + "before the adjusting the reserve drive count. Maximum [%s]. Array [%s]." + % (maximum_count, self.ssid)) + + if self.reserve_drive_count != self.get_reserve_drive_count(): + changed = True + + if not check_mode: + try: + rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid, + method="POST", data=dict(volumeGroupRef=self.pool_detail["id"], + newDriveCount=self.reserve_drive_count)) + except Exception as error: + self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]." + " Array [%s]." % (self.pool_detail["id"], self.ssid)) + + return changed + + def erase_all_available_secured_drives(self, check_mode=False): + """Erase all available drives that have encryption at rest feature enabled.""" + changed = False + drives_list = list() + for drive in self.drives: + if drive["available"] and drive["fdeEnabled"]: + changed = True + drives_list.append(drive["id"]) + + if drives_list and not check_mode: + try: + rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true" + % self.ssid, method="POST", data=dict(driveRef=drives_list)) + except Exception as error: + self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid) + + return changed + + def create_storage_pool(self): + """Create new storage pool.""" + url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid + request_body = dict(label=self.name, + candidate=self.get_candidate_drives()) + + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid + + request_body.update( + dict(backgroundOperationPriority="useDefault", + criticalReconstructPriority="useDefault", + degradedReconstructPriority="useDefault", + poolUtilizationCriticalThreshold=self.ddp_critical_threshold_pct, + poolUtilizationWarningThreshold=self.ddp_warning_threshold_pct)) + + if self.reserve_drive_count: + request_body.update(dict(volumeCandidateData=dict( + diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count)))) + + try: + rc, resp = self.request(url, method="POST", data=request_body) + except Exception as error: + self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + # Update drive and storage pool information + self.pool_detail = self.storage_pool + + def delete_storage_pool(self): + """Delete storage pool.""" + storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]] + try: + delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else "" + rc, resp = self.request("storage-systems/%s/storage-pools/%s%s" + % (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error [%s]." + % (self.pool_detail["id"], self.ssid, to_native(error))) + + if storage_pool_drives and self.erase_secured_drives: + try: + rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true" + % self.ssid, method="POST", data=dict(driveRef=storage_pool_drives)) + except Exception as error: + self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]." + " Error [%s]." % (self.ssid, to_native(error))) + + def secure_storage_pool(self, check_mode=False): + """Enable security on an existing storage pool""" + self.pool_detail = self.storage_pool + needs_secure_pool = False + + if not self.secure_pool and self.pool_detail["securityType"] == "enabled": + self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.") + if self.secure_pool and self.pool_detail["securityType"] != "enabled": + needs_secure_pool = True + + if needs_secure_pool and not check_mode: + try: + rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]), + data=dict(securePool=True), method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error" + " [%s]." % (self.pool_detail["id"], self.ssid, to_native(error))) + + self.pool_detail = self.storage_pool + return needs_secure_pool + + def migrate_raid_level(self, check_mode=False): + """Request storage pool raid level migration.""" + needs_migration = self.raid_level != self.pool_detail["raidLevel"] + if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool": + self.module.fail_json(msg="Raid level cannot be changed for disk pools") + + if needs_migration and not check_mode: + sp_raid_migrate_req = dict(raidLevel=self.raid_level) + + try: + rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration" + % (self.ssid, self.name), data=sp_raid_migrate_req, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]." + " Error [%s]." % (self.ssid, to_native(error))) + + self.pool_detail = self.storage_pool + return needs_migration + + def update_ddp_settings(self, check_mode=False): + """Update dynamic disk pool settings.""" + if self.raid_level != "raidDiskPool": + return False + + needs_update = False + if (self.pool_detail["volumeGroupData"]["diskPoolData"]["poolUtilizationWarningThreshold"] != self.ddp_warning_threshold_pct or + self.pool_detail["volumeGroupData"]["diskPoolData"]["poolUtilizationCriticalThreshold"] != self.ddp_critical_threshold_pct): + needs_update = True + + if needs_update and check_mode: + if self.pool_detail["volumeGroupData"]["diskPoolData"]["poolUtilizationWarningThreshold"] != self.ddp_warning_threshold_pct: + try: + rc, update = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]), method="POST", + data={"id": self.pool_detail["id"], + "poolThreshold": {"thresholdType": "warning", "value": self.ddp_warning_threshold_pct}}) + except Exception as error: + self.module.fail_json(msg="Failed to update DDP warning alert threshold! Pool [%s]. Array [%s]." + " Error [%s]" % (self.name, self.ssid, to_native(error))) + + if self.pool_detail["volumeGroupData"]["diskPoolData"]["poolUtilizationCriticalThreshold"] != self.ddp_critical_threshold_pct: + try: + rc, update = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]), method="POST", + data={"id": self.pool_detail["id"], + "poolThreshold": {"thresholdType": "critical", "value": self.ddp_critical_threshold_pct}}) + except Exception as error: + self.module.fail_json(msg="Failed to update DDP critical alert threshold! Pool [%s]. Array [%s]." + " Error [%s]" % (self.name, self.ssid, to_native(error))) + return needs_update + + def expand_storage_pool(self, check_mode=False): + """Add drives to existing storage pool. + + :return bool: whether drives were required to be added to satisfy the specified criteria.""" + expansion_candidate_list = self.get_expansion_candidate_drives() + changed_required = bool(expansion_candidate_list) + estimated_completion_time = 0.0 + + # build expandable groupings of traditional raid candidate + required_expansion_candidate_list = list() + while expansion_candidate_list: + subset = list() + while expansion_candidate_list and len(subset) < self.expandable_drive_count: + subset.extend(expansion_candidate_list.pop()["drives"]) + required_expansion_candidate_list.append(subset) + + if required_expansion_candidate_list and not check_mode: + url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid + + while required_expansion_candidate_list: + candidate_drives_list = required_expansion_candidate_list.pop() + request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"], + driveRef=candidate_drives_list) + try: + rc, resp = self.request(url, method="POST", data=request_body) + except Exception as error: + rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress" + % (self.ssid, self.pool_detail["id"]), ignore_errors=True) + if rc == 200 and actions_resp: + actions = [action["currentAction"] for action in actions_resp + if action["volumeRef"] in self.storage_pool_volumes] + self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions" + " in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error [%s]." + % (", ".join(actions), self.pool_detail["id"], self.ssid, + to_native(error))) + + self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]." + " Error [%s]." % (self.pool_detail["id"], self.ssid, to_native(error))) + + # Wait for expansion completion unless it is the last request in the candidate list + if required_expansion_candidate_list: + for dummy in range(self.EXPANSION_TIMEOUT_SEC): + rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress" + % (self.ssid, self.pool_detail["id"]), ignore_errors=True) + if rc == 200: + for action in actions_resp: + if (action["volumeRef"] in self.storage_pool_volumes and + action["currentAction"] == "remappingDce"): + sleep(1) + estimated_completion_time = action["estimatedTimeToCompletion"] + break + else: + estimated_completion_time = 0.0 + break + + return changed_required, estimated_completion_time + + def apply(self): + """Apply requested state to storage array.""" + changed = False + + if self.state == "present": + if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None: + self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be" + " specified.") + if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count): + self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.") + + self.pool_detail = self.storage_pool + self.module.log(pformat(self.pool_detail)) + + if self.state == "present" and self.erase_secured_drives: + self.erase_all_available_secured_drives(check_mode=True) + + # Determine whether changes need to be applied to the storage array + if self.pool_detail: + + if self.state == "absent": + changed = True + + elif self.state == "present": + + if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives): + self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]." + % (self.ssid, self.pool_detail["id"])) + + if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]: + self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type." + " Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"])) + + if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da != + self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]): + self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]." + " Pool [%s]." % (self.ssid, self.pool_detail["id"])) + + # Evaluate current storage pool for required change. + needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True) + if needs_expansion: + changed = True + if self.migrate_raid_level(check_mode=True): + changed = True + if self.secure_storage_pool(check_mode=True): + changed = True + if self.set_reserve_drive_count(check_mode=True): + changed = True + if self.update_ddp_settings(check_mode=True): + changed = True + + elif self.state == "present": + changed = True + + # Apply changes to storage array + msg = "No changes were required for the storage pool [%s]." + if changed and not self.module.check_mode: + if self.state == "present": + if self.erase_secured_drives: + self.erase_all_available_secured_drives() + + if self.pool_detail: + change_list = list() + + # Expansion needs to occur before raid level migration to account for any sizing needs. + expanded, estimated_completion_time = self.expand_storage_pool() + if expanded: + change_list.append("expanded") + if self.migrate_raid_level(): + change_list.append("raid migration") + if self.secure_storage_pool(): + change_list.append("secured") + if self.set_reserve_drive_count(): + change_list.append("adjusted reserve drive count") + + if self.update_ddp_settings(): + change_list.append("updated ddp settings") + + if change_list: + msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list) + + if expanded: + msg += "\nThe expansion operation will complete in an estimated %s minutes." % estimated_completion_time + else: + self.create_storage_pool() + msg = "Storage pool [%s] was created." + + if self.secure_storage_pool(): + msg = "Storage pool [%s] was created and secured." + if self.set_reserve_drive_count(): + msg += " Adjusted reserve drive count." + + elif self.pool_detail: + self.delete_storage_pool() + msg = "Storage pool [%s] removed." + + self.pool_detail = self.storage_pool + self.module.log(pformat(self.pool_detail)) + self.module.log(msg % self.name) + self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail) + + +def main(): + storage_pool = NetAppESeriesStoragePool() + storage_pool.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py new file mode 100644 index 000000000..212957ead --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_syslog.py @@ -0,0 +1,248 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_syslog +short_description: NetApp E-Series manage syslog settings +description: + - Allow the syslog settings to be configured for an individual E-Series storage-system +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - Add or remove the syslog server configuration for E-Series storage array. + - Existing syslog server configuration will be removed or updated when its address matches I(address). + - Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be + treated as a match. + type: str + choices: + - present + - absent + default: present + required: false + address: + description: + - The syslog server's IPv4 address or a fully qualified hostname. + - All existing syslog configurations will be removed when I(state=absent) and I(address=None). + type: str + required: false + port: + description: + - This is the port the syslog server is using. + type: int + default: 514 + required: false + protocol: + description: + - This is the transmission protocol the syslog server's using to receive syslog messages. + type: str + default: udp + choices: + - udp + - tcp + - tls + required: false + components: + description: + - The e-series logging components define the specific logs to transfer to the syslog server. + - At the time of writing, 'auditLog' is the only logging component but more may become available. + type: list + default: ["auditLog"] + required: false + test: + description: + - This forces a test syslog message to be sent to the stated syslog server. + - Only attempts transmission when I(state=present). + type: bool + default: false + required: false +notes: + - Check mode is supported. + - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with + SANtricity OS 11.40.2) and higher. +""" + +EXAMPLES = """ + - name: Add two syslog server configurations to NetApp E-Series storage array. + na_santricity_syslog: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: present + address: "{{ item }}" + port: 514 + protocol: tcp + component: "auditLog" + loop: + - "192.168.1.1" + - "192.168.1.100" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +syslog: + description: + - True if syslog server configuration has been added to e-series storage array. + returned: on success + sample: True + type: bool +""" +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesSyslog(NetAppESeriesModule): + def __init__(self): + ansible_options = dict( + state=dict(choices=["present", "absent"], required=False, default="present"), + address=dict(type="str", required=False), + port=dict(type="int", default=514, required=False), + protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False), + components=dict(type="list", required=False, default=["auditLog"]), + test=dict(type="bool", default=False, require=False)) + + required_if = [["state", "present", ["address", "port", "protocol", "components"]]] + mutually_exclusive = [["test", "absent"]] + super(NetAppESeriesSyslog, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + mutually_exclusive=mutually_exclusive, + required_if=required_if, + supports_check_mode=True) + args = self.module.params + + self.syslog = args["state"] in ["present"] + self.address = args["address"] + self.port = args["port"] + self.protocol = args["protocol"] + self.components = args["components"] + self.test = args["test"] + self.ssid = args["ssid"] + self.url = args["api_url"] + self.creds = dict(url_password=args["api_password"], + validate_certs=args["validate_certs"], + url_username=args["api_username"], ) + + self.components.sort() + self.check_mode = self.module.check_mode + + # Check whether request needs to be forwarded on to the controller web services rest api. + self.url_path_prefix = "" + if not self.is_embedded() and self.ssid != "0" and self.ssid.lower() != "proxy": + self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid + + def get_configuration(self): + """Retrieve existing syslog configuration.""" + try: + rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog" % self.ssid) + return result + except Exception as err: + self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def test_configuration(self, body): + """Send test syslog message to the storage array. + + Allows fix number of retries to occur before failure is issued to give the storage array time to create + new syslog server record. + """ + try: + rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s/test" % (self.ssid, body["id"]), method='POST') + except Exception as err: + self.module.fail_json(msg="We failed to send test message! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update_configuration(self): + """Post the syslog request to array.""" + config_match = None + perfect_match = None + update = False + body = dict() + + # search existing configuration for syslog server entry match + configs = self.get_configuration() + if self.address: + for config in configs: + if config["serverAddress"] == self.address: + config_match = config + if (config["port"] == self.port and config["protocol"] == self.protocol and + len(config["components"]) == len(self.components) and + all([component["type"] in self.components for component in config["components"]])): + perfect_match = config_match + break + + # generate body for the http request + if self.syslog: + if not perfect_match: + update = True + if config_match: + body.update(dict(id=config_match["id"])) + components = [dict(type=component_type) for component_type in self.components] + body.update(dict(serverAddress=self.address, port=self.port, + protocol=self.protocol, components=components)) + self.make_configuration_request(body) + + elif config_match: + + # remove specific syslog server configuration + if self.address: + update = True + body.update(dict(id=config_match["id"])) + self.make_configuration_request(body) + + # if no address is specified, remove all syslog server configurations + elif configs: + update = True + for config in configs: + body.update(dict(id=config["id"])) + self.make_configuration_request(body) + + return update + + def make_configuration_request(self, body): + # make http request(s) + if not self.check_mode: + try: + if self.syslog: + if "id" in body: + rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s" % (self.ssid, body["id"]), + method='POST', data=body) + else: + rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog" % self.ssid, method='POST', data=body) + body.update(result) + + # send syslog test message + if self.test: + self.test_configuration(body) + + elif "id" in body: + rc, result = self.request(self.url_path_prefix + "storage-systems/%s/syslog/%s" % (self.ssid, body["id"]), method='DELETE') + + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update(self): + """Update configuration and respond to ansible.""" + update = self.update_configuration() + self.module.exit_json(msg="The syslog settings have been updated.", changed=update) + + +def main(): + settings = NetAppESeriesSyslog() + settings.update() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py new file mode 100644 index 000000000..3a3552ff3 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/na_santricity_volume.py @@ -0,0 +1,945 @@ +#!/usr/bin/python + +# (c) 2020, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = """ +--- +module: na_santricity_volume +short_description: NetApp E-Series manage storage volumes (standard and thin) +description: + - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.santricity_doc +options: + state: + description: + - Whether the specified volume should exist + type: str + choices: ["present", "absent"] + default: "present" + name: + description: + - The name of the volume to manage. + type: str + required: true + storage_pool_name: + description: + - Required only when requested I(state=="present"). + - Name of the storage pool wherein the volume should reside. + type: str + required: false + size_unit: + description: + - The unit used to interpret the size parameter + - pct unit defines a percent of total usable storage pool size. + type: str + choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb", "pct"] + default: "gb" + size: + description: + - Required only when I(state=="present"). + - Size of the volume in I(size_unit). + - Size of the virtual volume in the case of a thin volume in I(size_unit). + - Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may exist. + type: float + required: true + size_tolerance_b: + description: + - Tolerance for total volume size measured in bytes; so, if the total volumes size is within + +/- I(size_tolerance_b) then no resizing will be expected. + - This parameter can be useful in the case of existing volumes not created by na_santricity_volume + since providing the exact size can be difficult due to volume alignment and overhead. + type: int + required: false + default: 10485760 + segment_size_kb: + description: + - Segment size of the volume + - All values are in kibibytes. + - Some common choices include 8, 16, 32, 64, 128, 256, and 512 but options are system + dependent. + - Retrieve the definitive system list from M(na_santricity_facts) under segment_sizes. + - When the storage pool is a raidDiskPool then the segment size must be 128kb. + - Segment size migrations are not allowed in this module + type: int + default: 128 + thin_provision: + description: + - Whether the volume should be thin provisioned. + - Thin volumes can only be created when I(raid_level=="raidDiskPool"). + - Generally, use of thin-provisioning is not recommended due to performance impacts. + type: bool + default: false + required: false + thin_volume_repo_size: + description: + - This value (in size_unit) sets the allocated space for the thin provisioned repository. + - Initial value must between or equal to 4gb and 256gb in increments of 4gb. + - During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb. + - This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic"). + - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic). + type: int + required: false + thin_volume_max_repo_size: + description: + - This is the maximum amount the thin volume repository will be allowed to grow. + - Only has significance when I(thin_volume_expansion_policy=="automatic"). + - When the pct I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds + I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute + the I(thin_volume_expansion_policy) policy. + - Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum + repository size. + - Default will be the same as I(size). + type: float + required: false + thin_volume_expansion_policy: + description: + - This is the thin volume expansion policy. + - When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the + I(thin_volume_max_repo_size) will be automatically expanded. + - When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the + storage system will wait for manual intervention. + - The thin volume_expansion policy can not be modified on existing thin volumes in this module. + - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic). + type: str + choices: ["automatic", "manual"] + default: "automatic" + required: false + thin_volume_growth_alert_threshold: + description: + - This is the thin provision repository utilization threshold (in percent). + - When the pct of used storage of the maximum repository size exceeds this value then a alert will + be issued and the I(thin_volume_expansion_policy) will be executed. + - Values must be between or equal to 10 and 99. + type: int + default: 95 + required: false + owning_controller: + description: + - Specifies which controller will be the primary owner of the volume + - Not specifying will allow the controller to choose ownership. + type: str + choices: ["A", "B"] + required: false + ssd_cache_enabled: + description: + - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined) + - The default value is to ignore existing SSD cache setting. + type: bool + default: false + required: false + data_assurance_enabled: + description: + - Determines whether data assurance (DA) should be enabled for the volume + - Only available when creating a new volume and on a storage pool with drives supporting the DA capability. + type: bool + default: false + required: false + read_cache_enable: + description: + - Indicates whether read caching should be enabled for the volume. + type: bool + default: true + required: false + read_ahead_enable: + description: + - Indicates whether or not automatic cache read-ahead is enabled. + - This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot + benefit from read ahead caching. + type: bool + default: true + required: false + write_cache_enable: + description: + - Indicates whether write-back caching should be enabled for the volume. + type: bool + default: true + required: false + write_cache_mirror_enable: + description: + - Indicates whether write cache mirroring should be enabled. + type: bool + default: true + required: false + cache_without_batteries: + description: + - Indicates whether caching should be used without battery backup. + - Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost! + type: bool + default: false + required: false + workload_name: + description: + - Label for the workload defined by the metadata. + - When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage + array. + - When I(workload_name) exists on the storage array but the metadata is different then the workload + definition will be updated. (Changes will update all associated volumes!) + - Existing workloads can be retrieved using M(na_santricity_facts). + type: str + required: false + workload_metadata: + description: + - Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily + defined for whatever the user deems useful) + - When I(workload_name) exists on the storage array but the metadata is different then the workload + definition will be updated. (Changes will update all associated volumes!) + - I(workload_name) must be specified when I(metadata) are defined. + - Dictionary key cannot be longer than 16 characters + - Dictionary values cannot be longer than 60 characters + type: dict + required: false + aliases: + - metadata + volume_metadata: + description: + - Dictionary containing metadata for the volume itself. + - Dictionary key cannot be longer than 14 characters + - Dictionary values cannot be longer than 240 characters + type: dict + required: false + allow_expansion: + description: + - Allows volume size to expand to meet the required specification. + - Warning, when I(allows_expansion==false) and the existing volume needs to be expanded the module will continue with a warning. + type: bool + default: false + required: false + wait_for_initialization: + description: + - Forces the module to wait for expansion operations to complete before continuing. + type: bool + default: false + required: false +""" +EXAMPLES = """ +- name: Create simple volume with workload tags (volume meta data) + na_santricity_volume: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: present + name: volume + storage_pool_name: storage_pool + size: 300 + size_unit: gb + workload_name: volume_tag + metadata: + key1: value1 + key2: value2 + +- name: Create a thin volume + na_santricity_volume: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: present + name: volume1 + storage_pool_name: storage_pool + size: 131072 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 1024 + +- name: Expand thin volume's virtual size + na_santricity_volume: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: present + name: volume1 + storage_pool_name: storage_pool + size: 262144 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 1024 + +- name: Expand thin volume's maximum repository size + na_santricity_volume: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: present + name: volume1 + storage_pool_name: storage_pool + size: 262144 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 2048 + +- name: Delete volume + na_santricity_volume: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + state: absent + name: volume +""" +RETURN = """ +msg: + description: State of volume + type: str + returned: always + sample: "Standard volume [workload_vol_1] has been created." +""" + +import time + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesVolume(NetAppESeriesModule): + VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300 + MAXIMUM_VOLUME_METADATA_KEY_LENGTH = 14 + MAXIMUM_VOLUME_METADATA_VALUE_LENGTH = 240 + MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH = 60 + + def __init__(self): + ansible_options = dict( + state=dict(choices=["present", "absent"], default="present"), + name=dict(required=True, type="str"), + storage_pool_name=dict(type="str"), + size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb", "pct"], type="str"), + size=dict(type="float"), + size_tolerance_b=dict(type="int", required=False, default=10485760), + segment_size_kb=dict(type="int", default=128, required=False), + owning_controller=dict(type="str", choices=["A", "B"], required=False), + ssd_cache_enabled=dict(type="bool", default=False), + data_assurance_enabled=dict(type="bool", default=False), + thin_provision=dict(type="bool", default=False), + thin_volume_repo_size=dict(type="int", required=False), + thin_volume_max_repo_size=dict(type="float", required=False), + thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"], default="automatic", required=False), + thin_volume_growth_alert_threshold=dict(type="int", default=95), + read_cache_enable=dict(type="bool", default=True), + read_ahead_enable=dict(type="bool", default=True), + write_cache_enable=dict(type="bool", default=True), + write_cache_mirror_enable=dict(type="bool", default=True), + cache_without_batteries=dict(type="bool", default=False), + workload_name=dict(type="str", required=False), + workload_metadata=dict(type="dict", require=False, aliases=["metadata"]), + volume_metadata=dict(type="dict", require=False), + allow_expansion=dict(type="bool", default=False), + wait_for_initialization=dict(type="bool", default=False)) + + required_if = [ + ["state", "present", ["storage_pool_name", "size"]], + ["thin_provision", "true", ["thin_volume_repo_size"]] + ] + + super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True, + required_if=required_if) + + args = self.module.params + self.state = args["state"] + self.name = args["name"] + self.storage_pool_name = args["storage_pool_name"] + self.size_unit = args["size_unit"] + self.size_tolerance_b = args["size_tolerance_b"] + self.segment_size_kb = args["segment_size_kb"] + + if args["size"]: + if self.size_unit == "pct": + if args["thin_provision"]: + self.module.fail_json(msg="'pct' is an invalid size unit for thin provisioning! Array [%s]." % self.ssid) + self.size_percent = args["size"] + else: + self.size_b = self.convert_to_aligned_bytes(args["size"]) + + self.owning_controller_id = None + if args["owning_controller"]: + self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002" + + self.read_cache_enable = args["read_cache_enable"] + self.read_ahead_enable = args["read_ahead_enable"] + self.write_cache_enable = args["write_cache_enable"] + self.write_cache_mirror_enable = args["write_cache_mirror_enable"] + self.ssd_cache_enabled = args["ssd_cache_enabled"] + self.cache_without_batteries = args["cache_without_batteries"] + self.data_assurance_enabled = args["data_assurance_enabled"] + + self.thin_provision = args["thin_provision"] + self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"] + self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"]) + self.thin_volume_repo_size_b = None + self.thin_volume_max_repo_size_b = None + + if args["thin_volume_repo_size"]: + self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"]) + if args["thin_volume_max_repo_size"]: + self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"]) + + self.workload_name = args["workload_name"] + self.allow_expansion = args["allow_expansion"] + self.wait_for_initialization = args["wait_for_initialization"] + + # convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to + # each of the workload attributes dictionary entries + self.metadata = [] + if self.state == "present" and args["workload_metadata"]: + if not self.workload_name: + self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified. Array [%s]." % self.ssid) + + for key, value in args["workload_metadata"].items(): + self.metadata.append({"key": key, "value": value}) + + self.volume_metadata = [] + if self.state == "present" and args["volume_metadata"]: + for key, value in args["volume_metadata"].items(): + key, value = str(key), str(value) + + if len(key) > self.MAXIMUM_VOLUME_METADATA_KEY_LENGTH: + self.module.fail_json(msg="Volume metadata keys must be less than %s characters long. Array [%s]." + % (str(self.MAXIMUM_VOLUME_METADATA_KEY_LENGTH), self.ssid)) + + if len(value) > self.MAXIMUM_VOLUME_METADATA_VALUE_LENGTH: + self.module.fail_json(msg="Volume metadata values must be less than %s characters long. Array [%s]." + % (str(self.MAXIMUM_VOLUME_METADATA_VALUE_LENGTH), self.ssid)) + + if value: + for index, start in enumerate(range(0, len(value), self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH)): + if len(value) > start + self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH: + self.volume_metadata.append({"key": "%s~%s" % (key, str(index)), + "value": value[start:start + self.MAXIMUM_VOLUME_METADATA_VALUE_SEGMENT_LENGTH]}) + else: + self.volume_metadata.append({"key": "%s~%s" % (key, str(index)), "value": value[start:len(value)]}) + else: + self.volume_metadata.append({"key": "%s~0" % key, "value": ""}) + + if self.state == "present" and self.thin_provision: + if not self.thin_volume_max_repo_size_b: + self.thin_volume_max_repo_size_b = self.size_b + + if not self.thin_volume_expansion_policy: + self.thin_volume_expansion_policy = "automatic" + + if self.size_b > 256 * 1024 ** 4: + self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size." + " Attempted size [%sg]" % (self.size_b * 1024 ** 3)) + + if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and + self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b): + self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum" + " repository size. Array [%s]." % self.ssid) + + if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99: + self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99." + "thin_volume_growth_alert_threshold [%s]. Array [%s]." + % (self.thin_volume_growth_alert_threshold, self.ssid)) + + self.volume_detail = None + self.pool_detail = None + self.workload_id = None + + def convert_to_aligned_bytes(self, size): + """Convert size to the truncated byte size that aligns on the segment size.""" + size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit]) + segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"]) + segment_count = int(size_bytes / segment_size_bytes) + return segment_count * segment_size_bytes + + def get_volume(self): + """Retrieve volume details from storage array.""" + volumes = list() + thin_volumes = list() + try: + rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + try: + rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + + volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name] + return volume_detail[0] if volume_detail else dict() + + def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5): + """Waits until volume becomes available. + + :raises AnsibleFailJson when retries are exhausted. + """ + if retries == 0: + self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]." + % (self.name, self.ssid)) + if not self.get_volume(): + time.sleep(5) + self.wait_for_volume_availability(retries=retries - 1) + + def wait_for_volume_action(self, timeout=None): + """Waits until volume action is complete is complete. + :param: int timeout: Wait duration measured in seconds. Waits indefinitely when None. + """ + action = "unknown" + percent_complete = None + while action != "complete": + time.sleep(5) + + try: + rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid) + + # Search long lived operations for volume + action = "complete" + for operation in operations["longLivedOpsProgress"]: + if operation["volAction"] is not None: + for key in operation.keys(): + if (operation[key] is not None and "volumeRef" in operation[key] and + (operation[key]["volumeRef"] == self.volume_detail["id"] or + ("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))): + action = operation["volAction"] + percent_complete = operation["init"]["percentComplete"] + except Exception as err: + self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(err))) + + if timeout is not None: + if timeout <= 0: + self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining" + " [%s]. Array Id [%s]." % (action, percent_complete, self.ssid)) + self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid)) + if timeout: + timeout -= 5 + + self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete)) + self.module.log("Expansion action is complete.") + + def get_storage_pool(self): + """Retrieve storage pool details from the storage array.""" + storage_pools = list() + try: + rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + + pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name] + return pool_detail[0] if pool_detail else dict() + + def check_storage_pool_sufficiency(self): + """Perform a series of checks as to the sufficiency of the storage pool for the volume.""" + if not self.pool_detail: + self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name) + + if not self.volume_detail: + if self.thin_provision and not self.pool_detail['diskPool']: + self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.') + + if (self.data_assurance_enabled and not + (self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and + self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")): + self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible." + " Array [%s]." % self.ssid) + + if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision: + self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs." + " Array [%s]." % self.ssid) + else: + # Check for expansion + if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and + not self.thin_provision): + self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs." + " Array [%s]." % self.ssid) + + def update_workload_tags(self, check_mode=False): + """Check the status of the workload tag and update storage array definitions if necessary. + + When the workload attributes are not provided but an existing workload tag name is, then the attributes will be + used. + + :return bool: Whether changes were required to be made.""" + change_required = False + workload_tags = None + request_body = None + ansible_profile_id = None + + if self.workload_name: + try: + rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid) + + ansible_profile_id = "Other_1" + request_body = dict(name=self.workload_name, + profileId=ansible_profile_id, + workloadInstanceIndex=None, + isValid=True) + + # evaluate and update storage array when needed + for tag in workload_tags: + if tag["name"] == self.workload_name: + self.workload_id = tag["id"] + + if not self.metadata: + break + + # Determine if core attributes (everything but profileId) is the same + metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata) + tag_set = set(tuple(sorted(attr.items())) + for attr in tag["workloadAttributes"] if attr["key"] != "profileId") + if metadata_set != tag_set: + self.module.log("Workload tag change is required!") + change_required = True + + # only perform the required action when check_mode==False + if change_required and not check_mode: + self.metadata.append(dict(key="profileId", value=ansible_profile_id)) + request_body.update(dict(isNewWorkloadInstance=False, + isWorkloadDataInitialized=True, + isWorkloadCardDataToBeReset=True, + workloadAttributes=self.metadata)) + try: + rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]), + data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]" + % (self.ssid, to_native(error))) + self.module.log("Workload tag [%s] required change." % self.workload_name) + break + + # existing workload tag not found so create new workload tag + else: + change_required = True + self.module.log("Workload tag creation is required!") + + if change_required and not check_mode: + if self.metadata: + self.metadata.append(dict(key="profileId", value=ansible_profile_id)) + else: + self.metadata = [dict(key="profileId", value=ansible_profile_id)] + + request_body.update(dict(isNewWorkloadInstance=True, + isWorkloadDataInitialized=False, + isWorkloadCardDataToBeReset=False, + workloadAttributes=self.metadata)) + try: + rc, resp = self.request("storage-systems/%s/workloads" % self.ssid, + method="POST", data=request_body) + self.workload_id = resp["id"] + except Exception as error: + self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]" + % (self.ssid, to_native(error))) + self.module.log("Workload tag [%s] was added." % self.workload_name) + + return change_required + + def get_volume_property_changes(self): + """Retrieve the volume update request body when change(s) are required. + + :raise AnsibleFailJson when attempting to change segment size on existing volume. + :return dict: request body when change(s) to a volume's properties are required. + """ + change = False + request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[], + cacheSettings=dict(readCacheEnable=self.read_cache_enable, + writeCacheEnable=self.write_cache_enable, + mirrorEnable=self.write_cache_mirror_enable)) + + # check for invalid modifications + if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]): + self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified." + % self.volume_detail["segmentSize"]) + + # common thick/thin volume properties + if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or + self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or + self.write_cache_mirror_enable != self.volume_detail["cacheSettings"]["mirrorEnable"] or + self.ssd_cache_enabled != self.volume_detail["flashCached"]): + change = True + + # controller ownership + if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]: + change = True + request_body.update(dict(owningControllerId=self.owning_controller_id)) + + # volume meta tags + request_body["metaTags"].extend(self.volume_metadata) + for entry in self.volume_metadata: + if entry not in self.volume_detail["metadata"]: + change = True + + if self.workload_name: + request_body["metaTags"].extend([{"key": "workloadId", "value": self.workload_id}, + {"key": "volumeTypeId", "value": "volume"}]) + + if ({"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"] or + {"key": "volumeTypeId", "value": "volume"} not in self.volume_detail["metadata"]): + change = True + + if len(self.volume_detail["metadata"]) != len(request_body["metaTags"]): + change = True + + # thick/thin volume specific properties + if self.thin_provision: + if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]): + change = True + request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold)) + if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]: + change = True + request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy)) + else: + if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0): + change = True + request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable)) + if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]: + change = True + request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries)) + + return request_body if change else dict() + + def get_expand_volume_changes(self): + """Expand the storage specifications for the existing thick/thin volume. + + :raise AnsibleFailJson when a thick/thin volume expansion request fails. + :return dict: dictionary containing all the necessary values for volume expansion request + """ + request_body = dict() + + if self.size_b < int(self.volume_detail["capacity"]) - self.size_tolerance_b: + self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]" + % (self.name, self.ssid)) + + if self.volume_detail["thinProvisioned"]: + if self.size_b > int(self.volume_detail["capacity"]) + self.size_tolerance_b: + request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b)) + self.module.log("Thin volume virtual size have been expanded.") + + if self.volume_detail["expansionPolicy"] == "automatic": + if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]) + self.size_tolerance_b: + request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b)) + self.module.log("Thin volume maximum repository size have been expanded (automatic policy).") + + elif self.volume_detail["expansionPolicy"] == "manual": + if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]) + self.size_tolerance_b: + change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"]) + if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0: + self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb" + " and 256gb in increments of 4gb. Attempted size [%sg]." + % (self.thin_volume_repo_size_b * 1024 ** 3)) + + request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b)) + self.module.log("Thin volume maximum repository size have been expanded (manual policy).") + + elif self.size_b > int(self.volume_detail["capacity"]) + self.size_tolerance_b: + request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b)) + self.module.log("Volume storage capacities have been expanded.") + + if request_body and not self.allow_expansion: + self.module.warn("Expansion not allowed! Change allow_expansion flag to true to allow volume expansions. Array Id [%s]." % self.ssid) + return dict() + + return request_body + + def create_volume(self): + """Create thick/thin volume according to the specified criteria.""" + body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes", + dataAssuranceEnabled=self.data_assurance_enabled) + + if self.volume_metadata: + body.update({"metaTags": self.volume_metadata}) + + if self.thin_provision: + body.update(dict(virtualSize=self.size_b, + repositorySize=self.thin_volume_repo_size_b, + maximumRepositorySize=self.thin_volume_max_repo_size_b, + expansionPolicy=self.thin_volume_expansion_policy, + growthAlertThreshold=self.thin_volume_growth_alert_threshold)) + try: + rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + + self.module.log("New thin volume created [%s]." % self.name) + + else: + body.update(dict(size=self.size_b, segSize=self.segment_size_kb)) + try: + rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + + self.module.log("New volume created [%s]." % self.name) + + def update_volume_properties(self): + """Update existing thin-volume or volume properties. + + :raise AnsibleFailJson when either thick/thin volume update request fails. + :return bool: whether update was applied + """ + self.wait_for_volume_availability() + self.volume_detail = self.get_volume() + + request_body = self.get_volume_property_changes() + + if request_body: + if self.thin_provision: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s" + % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(error))) + else: + try: + rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]), + data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(error))) + return True + return False + + def expand_volume(self): + """Expand the storage specifications for the existing thick/thin volume. + + :raise AnsibleFailJson when a thick/thin volume expansion request fails. + """ + request_body = self.get_expand_volume_changes() + if request_body: + if self.volume_detail["thinProvisioned"]: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand" + % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST") + except Exception as err: + self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(err))) + self.module.log("Thin volume specifications have been expanded.") + + else: + try: + rc, resp = self.request( + "storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']), + data=request_body, method="POST") + except Exception as err: + self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(err))) + + self.module.log("Volume storage capacities have been expanded.") + + def delete_volume(self): + """Delete existing thin/thick volume.""" + if self.thin_provision: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]), + method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + self.module.log("Thin volume deleted [%s]." % self.name) + else: + try: + rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]), + method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + self.module.log("Volume deleted [%s]." % self.name) + + def apply(self): + """Determine and apply any changes necessary to satisfy the specified criteria. + + :raise AnsibleExitJson when completes successfully""" + change = False + msg = None + + self.volume_detail = self.get_volume() + self.pool_detail = self.get_storage_pool() + + if self.pool_detail and self.size_unit == "pct": + space_mb = round(float(self.pool_detail["totalRaidedSpace"]), -8) / 1024 ** 2 - 100 + self.size_unit = "mb" + self.size_b = self.convert_to_aligned_bytes(space_mb * (self.size_percent / 100)) + + # Determine whether changes need to be applied to existing workload tags + if self.state == 'present' and self.update_workload_tags(check_mode=True): + change = True + + # Determine if any changes need to be applied + if self.volume_detail: + if self.state == 'absent': + change = True + + elif self.state == 'present': + # Must check the property changes first as it makes sure the segment size has no change before + # using the size to determine if the volume expansion is needed which will cause an irrelevant + # error message to show up. + if self.get_volume_property_changes() or self.get_expand_volume_changes(): + change = True + + elif self.state == 'present': + if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or + self.thin_volume_repo_size_b > 256 * 1024 ** 3 or + self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0): + self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in" + " increments of 4gb. Attempted size [%sg]." + % (self.thin_volume_repo_size_b * 1024 ** 3)) + change = True + + self.module.log("Update required: [%s]." % change) + + # Apply any necessary changes + if change and not self.module.check_mode: + if self.state == 'present': + if self.update_workload_tags(): + msg = "Workload tag change occurred." + + if not self.volume_detail: + self.check_storage_pool_sufficiency() + self.create_volume() + self.update_volume_properties() + msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created." + else: + if self.update_volume_properties(): + msg = "Volume [%s] properties were updated." + + if self.get_expand_volume_changes(): + self.expand_volume() + msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded." + + if self.wait_for_initialization: + self.module.log("Waiting for volume operation to complete.") + self.wait_for_volume_action() + + elif self.state == 'absent': + self.delete_volume() + msg = "Volume [%s] has been deleted." + + else: + msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists." + + self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change) + + +def main(): + volume = NetAppESeriesVolume() + volume.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py new file mode 100644 index 000000000..20c4dc57e --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_alerts.py @@ -0,0 +1,286 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_alerts +short_description: NetApp E-Series manage email notification settings +description: + - Certain E-Series systems have the capability to send email notifications on potentially critical events. + - This module will allow the owner of the system to specify email recipients for these messages. +version_added: '2.7' +author: Michael Price (@lmprice) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + description: + - Enable/disable the sending of email-based alerts. + default: enabled + required: false + type: str + choices: + - enabled + - disabled + server: + description: + - A fully qualified domain name, IPv4 address, or IPv6 address of a mail server. + - To use a fully qualified domain name, you must configure a DNS server on both controllers using + M(netapp_e_mgmt_interface). + - Required when I(state=enabled). + type: str + required: no + sender: + description: + - This is the sender that the recipient will see. It doesn't necessarily need to be a valid email account. + - Required when I(state=enabled). + type: str + required: no + contact: + description: + - Allows the owner to specify some free-form contact information to be included in the emails. + - This is typically utilized to provide a contact phone number. + type: str + required: no + recipients: + description: + - The email addresses that will receive the email notifications. + - Required when I(state=enabled). + type: list + required: no + test: + description: + - When a change is detected in the configuration, a test email will be sent. + - This may take a few minutes to process. + - Only applicable if I(state=enabled). + default: no + type: bool + log_path: + description: + - Path to a file on the Ansible control node to be used for debug logging + type: str + required: no +notes: + - Check mode is supported. + - Alertable messages are a subset of messages shown by the Major Event Log (MEL), of the storage-system. Examples + of alertable messages include drive failures, failed controllers, loss of redundancy, and other warning/critical + events. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher. +""" + +EXAMPLES = """ + - name: Enable email-based alerting + netapp_e_alerts: + state: enabled + sender: noreply@example.com + server: mail@example.com + contact: "Phone: 1-555-555-5555" + recipients: + - name1@example.com + - name2@example.com + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Disable alerting + netapp_e_alerts: + state: disabled + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +""" + +import json +import logging +from pprint import pformat +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class Alerts(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', required=False, default='enabled', + choices=['enabled', 'disabled']), + server=dict(type='str', required=False, ), + sender=dict(type='str', required=False, ), + contact=dict(type='str', required=False, ), + recipients=dict(type='list', required=False, ), + test=dict(type='bool', required=False, default=False, ), + log_path=dict(type='str', required=False), + )) + + required_if = [ + ['state', 'enabled', ['server', 'sender', 'recipients']] + ] + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + args = self.module.params + self.alerts = args['state'] == 'enabled' + self.server = args['server'] + self.sender = args['sender'] + self.contact = args['contact'] + self.recipients = args['recipients'] + self.test = args['test'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + # Very basic validation on email addresses: xx@yy.zz + email = re.compile(r"[^@]+@[^@]+\.[^@]+") + + if self.sender and not email.match(self.sender): + self.module.fail_json(msg="The sender (%s) provided is not a valid email address." % self.sender) + + if self.recipients is not None: + for recipient in self.recipients: + if not email.match(recipient): + self.module.fail_json(msg="The recipient (%s) provided is not a valid email address." % recipient) + + if len(self.recipients) < 1: + self.module.fail_json(msg="At least one recipient address must be specified.") + + def get_configuration(self): + try: + (rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, headers=HEADERS, + **self.creds) + self._logger.info("Current config: %s", pformat(result)) + return result + + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the alerts configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update_configuration(self): + config = self.get_configuration() + update = False + body = dict() + + if self.alerts: + body = dict(alertingEnabled=True) + if not config['alertingEnabled']: + update = True + + body.update(emailServerAddress=self.server) + if config['emailServerAddress'] != self.server: + update = True + + body.update(additionalContactInformation=self.contact, sendAdditionalContactInformation=True) + if self.contact and (self.contact != config['additionalContactInformation'] + or not config['sendAdditionalContactInformation']): + update = True + + body.update(emailSenderAddress=self.sender) + if config['emailSenderAddress'] != self.sender: + update = True + + self.recipients.sort() + if config['recipientEmailAddresses']: + config['recipientEmailAddresses'].sort() + + body.update(recipientEmailAddresses=self.recipients) + if config['recipientEmailAddresses'] != self.recipients: + update = True + + elif config['alertingEnabled']: + body = dict(alertingEnabled=False) + update = True + + self._logger.debug(pformat(body)) + + if update and not self.check_mode: + try: + (rc, result) = request(self.url + 'storage-systems/%s/device-alerts' % self.ssid, method='POST', + data=json.dumps(body), headers=HEADERS, **self.creds) + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return update + + def send_test_email(self): + """Send a test email to verify that the provided configuration is valid and functional.""" + if not self.check_mode: + try: + (rc, result) = request(self.url + 'storage-systems/%s/device-alerts/alert-email-test' % self.ssid, + timeout=300, method='POST', headers=HEADERS, **self.creds) + + if result['response'] != 'emailSentOK': + self.module.fail_json(msg="The test email failed with status=[%s]! Array Id [%s]." + % (result['response'], self.ssid)) + + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to send the test email! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update(self): + update = self.update_configuration() + + if self.test and update: + self._logger.info("An update was detected and test=True, running a test.") + self.send_test_email() + + if self.alerts: + msg = 'Alerting has been enabled using server=%s, sender=%s.' % (self.server, self.sender) + else: + msg = 'Alerting has been disabled.' + + self.module.exit_json(msg=msg, changed=update, ) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + alerts = Alerts() + alerts() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py new file mode 100644 index 000000000..e2bfa4193 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: netapp_e_amg +short_description: NetApp E-Series create, remove, and update asynchronous mirror groups +description: + - Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + name: + description: + - The name of the async array you wish to target, or create. + - If C(state) is present and the name isn't found, it will attempt to create. + type: str + required: yes + new_name: + description: + - New async array name + type: str + required: no + secondaryArrayId: + description: + - The ID of the secondary array to be used in mirroring process + type: str + required: yes + syncIntervalMinutes: + description: + - The synchronization interval in minutes + type: int + default: 10 + manualSync: + description: + - Setting this to true will cause other synchronization values to be ignored + type: bool + default: 'no' + recoveryWarnThresholdMinutes: + description: + - Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value + type: int + default: 20 + repoUtilizationWarnThreshold: + description: + - Recovery point warning threshold + type: int + default: 80 + interfaceType: + description: + - The intended protocol to use if both Fibre and iSCSI are available. + type: str + choices: + - iscsi + - fibre + syncWarnThresholdMinutes: + description: + - The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete. + default: 10 + type: int + state: + description: + - A C(state) of present will either create or update the async mirror group. + - A C(state) of absent will remove the async mirror group. + type: str + choices: [ absent, present ] + required: yes +""" + +EXAMPLES = """ + - name: AMG removal + na_eseries_amg: + state: absent + ssid: "{{ ssid }}" + secondaryArrayId: "{{amg_secondaryArrayId}}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + new_name: "{{amg_array_name}}" + name: "{{amg_name}}" + when: amg_create + + - name: AMG create + netapp_e_amg: + state: present + ssid: "{{ ssid }}" + secondaryArrayId: "{{amg_secondaryArrayId}}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + new_name: "{{amg_array_name}}" + name: "{{amg_name}}" + when: amg_create +""" + +RETURN = """ +msg: + description: Successful creation + returned: success + type: str + sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}' +""" # NOQA + +import json +import traceback + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec + + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def has_match(module, ssid, api_url, api_pwd, api_usr, body): + compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes', + 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold'] + desired_state = dict((x, (body.get(x))) for x in compare_keys) + label_exists = False + matches_spec = False + current_state = None + async_id = None + api_data = None + desired_name = body.get('name') + endpoint = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + endpoint + try: + rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS) + except Exception as e: + module.exit_json(msg="Error finding a match. Message: %s" % to_native(e), exception=traceback.format_exc()) + + for async_group in data: + if async_group['label'] == desired_name: + label_exists = True + api_data = async_group + async_id = async_group['groupRef'] + current_state = dict( + syncIntervalMinutes=async_group['syncIntervalMinutes'], + syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'], + recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'], + repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'], + ) + + if current_state == desired_state: + matches_spec = True + + return label_exists, matches_spec, api_data, async_id + + +def create_async(module, ssid, api_url, api_pwd, api_usr, body): + endpoint = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + endpoint + post_data = json.dumps(body) + try: + rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd, + headers=HEADERS) + except Exception as e: + module.exit_json(msg="Exception while creating aysnc mirror group. Message: %s" % to_native(e), + exception=traceback.format_exc()) + return data + + +def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id): + endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id) + url = api_url + endpoint + compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes', + 'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold'] + desired_state = dict((x, (body.get(x))) for x in compare_keys) + + if new_name: + desired_state['new_name'] = new_name + + post_data = json.dumps(desired_state) + + try: + rc, data = request(url, data=post_data, method='POST', headers=HEADERS, + url_username=user, url_password=pwd) + except Exception as e: + module.exit_json(msg="Exception while updating async mirror group. Message: %s" % to_native(e), + exception=traceback.format_exc()) + + return data + + +def remove_amg(module, ssid, api_url, pwd, user, async_id): + endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id) + url = api_url + endpoint + try: + rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, + headers=HEADERS) + except Exception as e: + module.exit_json(msg="Exception while removing async mirror group. Message: %s" % to_native(e), + exception=traceback.format_exc()) + + return + + +def main(): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + name=dict(required=True, type='str'), + new_name=dict(required=False, type='str'), + secondaryArrayId=dict(required=True, type='str'), + syncIntervalMinutes=dict(required=False, default=10, type='int'), + manualSync=dict(required=False, default=False, type='bool'), + recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'), + repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'), + interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'), + state=dict(required=True, choices=['present', 'absent']), + syncWarnThresholdMinutes=dict(required=False, default=10, type='int') + )) + + module = AnsibleModule(argument_spec=argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + new_name = p.pop('new_name') + state = p.pop('state') + + if not api_url.endswith('/'): + api_url += '/' + + name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p) + + if state == 'present': + if name_exists and spec_matches: + module.exit_json(changed=False, msg="Desired state met", **api_data) + elif name_exists and not spec_matches: + results = update_async(module, ssid, api_url, pwd, user, + p, new_name, async_id) + module.exit_json(changed=True, + msg="Async mirror group updated", async_id=async_id, + **results) + elif not name_exists: + results = create_async(module, ssid, api_url, user, pwd, p) + module.exit_json(changed=True, **results) + + elif state == 'absent': + if name_exists: + remove_amg(module, ssid, api_url, pwd, user, async_id) + module.exit_json(changed=True, msg="Async mirror group removed.", + async_id=async_id) + else: + module.exit_json(changed=False, + msg="Async Mirror group: %s already absent" % p['name']) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py new file mode 100644 index 000000000..a67506f3f --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_role.py @@ -0,0 +1,244 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: netapp_e_amg_role +short_description: NetApp E-Series update the role of a storage array within an Asynchronous Mirror Group (AMG). +description: + - Update a storage array to become the primary or secondary instance in an asynchronous mirror group +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + type: str + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + type: str + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + type: str + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + ssid: + description: + - The ID of the primary storage array for the async mirror action + required: yes + type: str + name: + description: + - Name of the role + required: yes + type: str + role: + description: + - Whether the array should be the primary or secondary array for the AMG + required: yes + type: str + choices: ['primary', 'secondary'] + noSync: + description: + - Whether to avoid synchronization prior to role reversal + required: no + default: no + type: bool + force: + description: + - Whether to force the role reversal regardless of the online-state of the primary + required: no + default: no + type: bool +""" + +EXAMPLES = """ + - name: Update the role of a storage array + netapp_e_amg_role: + name: updating amg role + role: primary + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +""" + +RETURN = """ +msg: + description: Failure message + returned: failure + type: str + sample: "No Async Mirror Group with the name." +""" +import json +import traceback + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url + + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as e: + r = e.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def has_match(module, ssid, api_url, api_pwd, api_usr, body, name): + amg_exists = False + has_desired_role = False + amg_id = None + amg_data = None + get_amgs = 'storage-systems/%s/async-mirrors' % ssid + url = api_url + get_amgs + try: + amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd, + headers=HEADERS) + except Exception: + module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid)) + + for amg in amgs: + if amg['label'] == name: + amg_exists = True + amg_id = amg['id'] + amg_data = amg + if amg['localRole'] == body.get('role'): + has_desired_role = True + + return amg_exists, has_desired_role, amg_id, amg_data + + +def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id): + endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id) + url = api_url + endpoint + post_data = json.dumps(body) + try: + request(url, data=post_data, method='POST', url_username=api_usr, + url_password=api_pwd, headers=HEADERS) + except Exception as e: + module.fail_json( + msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), + exception=traceback.format_exc()) + + status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id) + status_url = api_url + status_endpoint + try: + rc, status = request(status_url, method='GET', url_username=api_usr, + url_password=api_pwd, headers=HEADERS) + except Exception as e: + module.fail_json( + msg="Failed to check status of AMG after role reversal. " + "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), + exception=traceback.format_exc()) + + # Here we wait for the role reversal to complete + if 'roleChangeProgress' in status: + while status['roleChangeProgress'] != "none": + try: + rc, status = request(status_url, method='GET', + url_username=api_usr, url_password=api_pwd, headers=HEADERS) + except Exception as e: + module.fail_json( + msg="Failed to check status of AMG after role reversal. " + "Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, to_native(e)), + exception=traceback.format_exc()) + return status + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + name=dict(required=True, type='str'), + role=dict(required=True, choices=['primary', 'secondary']), + noSync=dict(required=False, type='bool', default=False), + force=dict(required=False, type='bool', default=False), + ssid=dict(required=True, type='str'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + )) + + module = AnsibleModule(argument_spec=argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + name = p.pop('name') + + if not api_url.endswith('/'): + api_url += '/' + + agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name) + + if not agm_exists: + module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name) + elif has_desired_role: + module.exit_json(changed=False, **amg_data) + + else: + amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id) + if amg_data: + module.exit_json(changed=True, **amg_data) + else: + module.exit_json(changed=True, msg="AMG role changed.") + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py new file mode 100644 index 000000000..056accd6b --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_amg_sync.py @@ -0,0 +1,267 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: netapp_e_amg_sync +short_description: NetApp E-Series conduct synchronization actions on asynchronous mirror groups. +description: + - Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays. +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + api_username: + required: true + type: str + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + type: str + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + type: str + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + ssid: + description: + - The ID of the storage array containing the AMG you wish to target + type: str + name: + description: + - The name of the async mirror group you wish to target + type: str + required: yes + state: + description: + - The synchronization action you'd like to take. + - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in + progress, it will return with an OK status. + - If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended + type: str + choices: + - running + - suspended + required: yes + delete_recovery_point: + description: + - Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization. + - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last + failures point will be deleted and synchronization will continue. + - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary + and the failures point will be preserved. + - "NOTE: This only has impact for newly launched syncs." + type: bool + default: no +""" +EXAMPLES = """ + - name: start AMG async + netapp_e_amg_sync: + name: "{{ amg_sync_name }}" + state: running + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" +""" +RETURN = """ +json: + description: The object attributes of the AMG. + returned: success + type: str + example: + { + "changed": false, + "connectionType": "fc", + "groupRef": "3700000060080E5000299C24000006EF57ACAC70", + "groupState": "optimal", + "id": "3700000060080E5000299C24000006EF57ACAC70", + "label": "made_with_ansible", + "localRole": "primary", + "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", + "orphanGroup": false, + "recoveryPointAgeAlertThresholdMinutes": 20, + "remoteRole": "secondary", + "remoteTarget": { + "nodeName": { + "ioInterfaceType": "fc", + "iscsiNodeName": null, + "remoteNodeWWN": "20040080E5299F1C" + }, + "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", + "scsiinitiatorTargetBaseProperties": { + "ioInterfaceType": "fc", + "iscsiinitiatorTargetBaseParameters": null + } + }, + "remoteTargetId": "ansible2", + "remoteTargetName": "Ansible2", + "remoteTargetWwn": "60080E5000299F880000000056A25D56", + "repositoryUtilizationWarnThreshold": 80, + "roleChangeProgress": "none", + "syncActivity": "idle", + "syncCompletionTimeAlertThresholdMinutes": 10, + "syncIntervalMinutes": 10, + "worldWideName": "60080E5000299C24000006EF57ACAC70" + } +""" +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils.urls import open_url + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as e: + r = e.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class AMGsync(object): + def __init__(self): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + name=dict(required=True, type='str'), + ssid=dict(required=True, type='str'), + state=dict(required=True, type='str', choices=['running', 'suspended']), + delete_recovery_point=dict(required=False, type='bool', default=False) + )) + self.module = AnsibleModule(argument_spec=argument_spec) + args = self.module.params + self.name = args['name'] + self.ssid = args['ssid'] + self.state = args['state'] + self.delete_recovery_point = args['delete_recovery_point'] + try: + self.user = args['api_username'] + self.pwd = args['api_password'] + self.url = args['api_url'] + except KeyError: + self.module.fail_json(msg="You must pass in api_username" + "and api_password and api_url to the module.") + self.certs = args['validate_certs'] + + self.post_headers = { + "Accept": "application/json", + "Content-Type": "application/json" + } + self.amg_id, self.amg_obj = self.get_amg() + + def get_amg(self): + endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid + (rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + headers=self.post_headers) + try: + amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id'] + amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0] + except IndexError: + self.module.fail_json( + msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid)) + return amg_id, amg_obj + + @property + def current_state(self): + amg_id, amg_obj = self.get_amg() + return amg_obj['syncActivity'] + + def run_sync_action(self): + # If we get to this point we know that the states differ, and there is no 'err' state, + # so no need to revalidate + + post_body = dict() + if self.state == 'running': + if self.current_state == 'idle': + if self.delete_recovery_point: + post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point)) + suffix = 'sync' + else: + # In a suspended state + suffix = 'resume' + else: + suffix = 'suspend' + + endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix) + + (rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd, + validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers, + ignore_errors=True) + + if not str(rc).startswith('2'): + self.module.fail_json(msg=str(resp['errorMessage'])) + + return resp + + def apply(self): + state_map = dict( + running=['active'], + suspended=['userSuspended', 'internallySuspended', 'paused'], + err=['unkown', '_UNDEFINED']) + + if self.current_state not in state_map[self.state]: + if self.current_state in state_map['err']: + self.module.fail_json( + msg="The sync is a state of '%s', this requires manual intervention. " + + "Please investigate and try again" % self.current_state) + else: + self.amg_obj = self.run_sync_action() + + (ret, amg) = self.get_amg() + self.module.exit_json(changed=False, **amg) + + +def main(): + sync = AMGsync() + sync.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py new file mode 100644 index 000000000..f039626af --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_asup.py @@ -0,0 +1,314 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_asup +short_description: NetApp E-Series manage auto-support settings +description: + - Allow the auto-support settings to be configured for an individual E-Series storage-system +version_added: '2.7' +author: Michael Price (@lmprice) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + description: + - Enable/disable the E-Series auto-support configuration. + - When this option is enabled, configuration, logs, and other support-related information will be relayed + to NetApp to help better support your system. No personally identifiable information, passwords, etc, will + be collected. + default: enabled + type: str + choices: + - enabled + - disabled + aliases: + - asup + - auto_support + - autosupport + active: + description: + - Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's + possible that the bundle did not contain all of the required information at the time of the event. + Enabling this option allows NetApp support personnel to manually request transmission or re-transmission + of support data in order ot resolve the problem. + - Only applicable if I(state=enabled). + default: yes + type: bool + start: + description: + - A start hour may be specified in a range from 0 to 23 hours. + - ASUP bundles will be sent daily between the provided start and end time (UTC). + - I(start) must be less than I(end). + aliases: + - start_time + default: 0 + type: int + end: + description: + - An end hour may be specified in a range from 1 to 24 hours. + - ASUP bundles will be sent daily between the provided start and end time (UTC). + - I(start) must be less than I(end). + aliases: + - end_time + default: 24 + type: int + days: + description: + - A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one + of the provided days. + choices: + - monday + - tuesday + - wednesday + - thursday + - friday + - saturday + - sunday + required: no + type: list + aliases: + - days_of_week + - schedule_days + verbose: + description: + - Provide the full ASUP configuration in the return. + default: no + required: no + type: bool + log_path: + description: + - A local path to a file to be used for debug logging + type: str + required: no +notes: + - Check mode is supported. + - Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively + respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be + disabled if desired. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher. +""" + +EXAMPLES = """ + - name: Enable ASUP and allow pro-active retrieval of bundles + netapp_e_asup: + state: enabled + active: yes + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST. + netapp_e_asup: + start: 17 + end: 20 + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +asup: + description: + - True if ASUP is enabled. + returned: on success + sample: True + type: bool +active: + description: + - True if the active option has been enabled. + returned: on success + sample: True + type: bool +cfg: + description: + - Provide the full ASUP configuration. + returned: on success when I(verbose=true). + type: complex + contains: + asupEnabled: + description: + - True if ASUP has been enabled. + type: bool + onDemandEnabled: + description: + - True if ASUP active monitoring has been enabled. + type: bool + daysOfWeek: + description: + - The days of the week that ASUP bundles will be sent. + type: list +""" + +import json +import logging +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class Asup(object): + DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday'] + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'], + choices=['enabled', 'disabled']), + active=dict(type='bool', required=False, default=True, ), + days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'], + choices=self.DAYS_OPTIONS), + start=dict(type='int', required=False, default=0, aliases=['start_time']), + end=dict(type='int', required=False, default=24, aliases=['end_time']), + verbose=dict(type='bool', required=False, default=False), + log_path=dict(type='str', required=False), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, ) + args = self.module.params + self.asup = args['state'] == 'enabled' + self.active = args['active'] + self.days = args['days'] + self.start = args['start'] + self.end = args['end'] + self.verbose = args['verbose'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + if self.start >= self.end: + self.module.fail_json(msg="The value provided for the start time is invalid." + " It must be less than the end time.") + if self.start < 0 or self.start > 23: + self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.") + else: + self.start = self.start * 60 + if self.end < 1 or self.end > 24: + self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.") + else: + self.end = min(self.end * 60, 1439) + + if not self.days: + self.days = self.DAYS_OPTIONS + + def get_configuration(self): + try: + (rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds) + + if not (result['asupCapable'] and result['onDemandCapable']): + self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid)) + return result + + except Exception as err: + self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update_configuration(self): + config = self.get_configuration() + update = False + body = dict() + + if self.asup: + body = dict(asupEnabled=True) + if not config['asupEnabled']: + update = True + + if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active: + update = True + body.update(dict(onDemandEnabled=self.active, + remoteDiagsEnabled=self.active)) + self.days.sort() + config['schedule']['daysOfWeek'].sort() + + body['schedule'] = dict(daysOfWeek=self.days, + dailyMinTime=self.start, + dailyMaxTime=self.end, + weeklyMinTime=self.start, + weeklyMaxTime=self.end) + + if self.days != config['schedule']['daysOfWeek']: + update = True + if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']: + update = True + elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']: + update = True + + elif config['asupEnabled']: + body = dict(asupEnabled=False) + update = True + + self._logger.info(pformat(body)) + + if update and not self.check_mode: + try: + (rc, result) = request(self.url + 'device-asup', method='POST', + data=json.dumps(body), headers=HEADERS, **self.creds) + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return update + + def update(self): + update = self.update_configuration() + cfg = self.get_configuration() + if self.verbose: + self.module.exit_json(msg="The ASUP settings have been updated.", changed=update, + asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg) + else: + self.module.exit_json(msg="The ASUP settings have been updated.", changed=update, + asup=cfg['asupEnabled'], active=cfg['onDemandEnabled']) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + settings = Asup() + settings() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py new file mode 100644 index 000000000..814a72d34 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auditlog.py @@ -0,0 +1,286 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_auditlog +short_description: NetApp E-Series manage audit-log configuration +description: + - This module allows an e-series storage system owner to set audit-log configuration parameters. +version_added: '2.7' +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + max_records: + description: + - The maximum number log messages audit-log will retain. + - Max records must be between and including 100 and 50000. + default: 50000 + type: int + log_level: + description: Filters the log messages according to the specified log level selection. + choices: + - all + - writeOnly + default: writeOnly + type: str + full_policy: + description: Specifies what audit-log should do once the number of entries approach the record limit. + choices: + - overWrite + - preventSystemAccess + default: overWrite + type: str + threshold: + description: + - This is the memory full percent threshold that audit-log will start issuing warning messages. + - Percent range must be between and including 60 and 90. + default: 90 + type: int + force: + description: + - Forces the audit-log configuration to delete log history when log messages fullness cause immediate + warning or full condition. + - Warning! This will cause any existing audit-log messages to be deleted. + - This is only applicable for I(full_policy=preventSystemAccess). + type: bool + default: no + log_path: + description: A local path to a file to be used for debug logging. + required: no + type: str +notes: + - Check mode is supported. + - This module is currently only supported with the Embedded Web Services API v3.0 and higher. +""" + +EXAMPLES = """ +- name: Define audit-log to prevent system access if records exceed 50000 with warnings occurring at 60% capacity. + netapp_e_auditlog: + api_url: "https://{{ netapp_e_api_host }}/devmgr/v2" + api_username: "{{ netapp_e_api_username }}" + api_password: "{{ netapp_e_api_password }}" + ssid: "{{ netapp_e_ssid }}" + validate_certs: no + max_records: 50000 + log_level: all + full_policy: preventSystemAccess + threshold: 60 + log_path: /path/to/log_file.log +- name: Define audit-log utilize the default values. + netapp_e_auditlog: + api_url: "https://{{ netapp_e_api_host }}/devmgr/v2" + api_username: "{{ netapp_e_api_username }}" + api_password: "{{ netapp_e_api_password }}" + ssid: "{{ netapp_e_ssid }}" +- name: Force audit-log configuration when full or warning conditions occur while enacting preventSystemAccess policy. + netapp_e_auditlog: + api_url: "https://{{ netapp_e_api_host }}/devmgr/v2" + api_username: "{{ netapp_e_api_username }}" + api_password: "{{ netapp_e_api_password }}" + ssid: "{{ netapp_e_ssid }}" + max_records: 5000 + log_level: all + full_policy: preventSystemAccess + threshold: 60 + force: yes +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +""" + +import json +import logging +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +try: + from urlparse import urlparse, urlunparse +except Exception: + from urllib.parse import urlparse, urlunparse + + +class AuditLog(object): + """Audit-log module configuration class.""" + MAX_RECORDS = 50000 + HEADERS = {"Content-Type": "application/json", + "Accept": "application/json"} + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + max_records=dict(type="int", default=50000), + log_level=dict(type="str", default="writeOnly", choices=["all", "writeOnly"]), + full_policy=dict(type="str", default="overWrite", choices=["overWrite", "preventSystemAccess"]), + threshold=dict(type="int", default=90), + force=dict(type="bool", default=False), + log_path=dict(type='str', required=False))) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + args = self.module.params + + self.max_records = args["max_records"] + if self.max_records < 100 or self.max_records > self.MAX_RECORDS: + self.module.fail_json(msg="Audit-log max_records count must be between 100 and 50000: [%s]" + % self.max_records) + self.threshold = args["threshold"] + if self.threshold < 60 or self.threshold > 90: + self.module.fail_json(msg="Audit-log percent threshold must be between 60 and 90: [%s]" % self.threshold) + self.log_level = args["log_level"] + self.full_policy = args["full_policy"] + self.force = args["force"] + self.ssid = args['ssid'] + self.url = args['api_url'] + if not self.url.endswith('/'): + self.url += '/' + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + # logging setup + log_path = args['log_path'] + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + self.proxy_used = self.is_proxy() + self._logger.info(self.proxy_used) + self.check_mode = self.module.check_mode + + def is_proxy(self): + """Determine whether the API is embedded or proxy.""" + try: + + # replace http url path with devmgr/utils/about + about_url = list(urlparse(self.url)) + about_url[2] = "devmgr/utils/about" + about_url = urlunparse(about_url) + + rc, data = request(about_url, timeout=300, headers=self.HEADERS, **self.creds) + + return data["runningAsProxy"] + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the webservices about information! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def get_configuration(self): + """Retrieve the existing audit-log configurations. + + :returns: dictionary containing current audit-log configuration + """ + try: + if self.proxy_used: + rc, data = request(self.url + "audit-log/config", timeout=300, headers=self.HEADERS, **self.creds) + else: + rc, data = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid, + timeout=300, headers=self.HEADERS, **self.creds) + return data + except Exception as err: + self.module.fail_json(msg="Failed to retrieve the audit-log configuration! " + "Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def build_configuration(self): + """Build audit-log expected configuration. + + :returns: Tuple containing update boolean value and dictionary of audit-log configuration + """ + config = self.get_configuration() + + current = dict(auditLogMaxRecords=config["auditLogMaxRecords"], + auditLogLevel=config["auditLogLevel"], + auditLogFullPolicy=config["auditLogFullPolicy"], + auditLogWarningThresholdPct=config["auditLogWarningThresholdPct"]) + + body = dict(auditLogMaxRecords=self.max_records, + auditLogLevel=self.log_level, + auditLogFullPolicy=self.full_policy, + auditLogWarningThresholdPct=self.threshold) + + update = current != body + + self._logger.info(pformat(update)) + self._logger.info(pformat(body)) + return update, body + + def delete_log_messages(self): + """Delete all audit-log messages.""" + self._logger.info("Deleting audit-log messages...") + try: + if self.proxy_used: + rc, result = request(self.url + "audit-log?clearAll=True", timeout=300, + method="DELETE", headers=self.HEADERS, **self.creds) + else: + rc, result = request(self.url + "storage-systems/%s/audit-log?clearAll=True" % self.ssid, timeout=300, + method="DELETE", headers=self.HEADERS, **self.creds) + except Exception as err: + self.module.fail_json(msg="Failed to delete audit-log messages! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update_configuration(self, update=None, body=None, attempt_recovery=True): + """Update audit-log configuration.""" + if update is None or body is None: + update, body = self.build_configuration() + + if update and not self.check_mode: + try: + if self.proxy_used: + rc, result = request(self.url + "storage-systems/audit-log/config", timeout=300, + data=json.dumps(body), method='POST', headers=self.HEADERS, + ignore_errors=True, **self.creds) + else: + rc, result = request(self.url + "storage-systems/%s/audit-log/config" % self.ssid, timeout=300, + data=json.dumps(body), method='POST', headers=self.HEADERS, + ignore_errors=True, **self.creds) + + if rc == 422: + if self.force and attempt_recovery: + self.delete_log_messages() + update = self.update_configuration(update, body, False) + else: + self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(rc, result))) + + except Exception as error: + self.module.fail_json(msg="Failed to update audit-log configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(error))) + return update + + def update(self): + """Update the audit-log configuration.""" + update = self.update_configuration() + self.module.exit_json(msg="Audit-log update complete", changed=update) + + def __call__(self): + self.update() + + +def main(): + auditlog = AuditLog() + auditlog() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py new file mode 100644 index 000000000..ac5c14c06 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_auth.py @@ -0,0 +1,283 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_auth +short_description: NetApp E-Series set or update the password for a storage array. +description: + - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web + Services proxy. Note, all storage arrays do not have a Monitor or RO role. +version_added: "2.2" +author: Kevin Hulquest (@hulquest) +options: + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + name: + description: + - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use + the ID instead. + required: False + type: str + ssid: + description: + - the identifier of the storage array in the Web Services Proxy. + required: False + type: str + set_admin: + description: + - Boolean value on whether to update the admin password. If set to false then the RO account is updated. + type: bool + default: False + current_password: + description: + - The current admin password. This is not required if the password hasn't been set before. + required: False + type: str + new_password: + description: + - The password you would like to set. Cannot be more than 30 characters. + required: True + type: str + api_url: + description: + - The full API url. + - "Example: http://ENDPOINT:8080/devmgr/v2" + - This can optionally be set via an environment variable, API_URL + required: False + type: str + api_username: + description: + - The username used to authenticate against the API + - This can optionally be set via an environment variable, API_USERNAME + required: False + type: str + api_password: + description: + - The password used to authenticate against the API + - This can optionally be set via an environment variable, API_PASSWORD + required: False + type: str +''' + +EXAMPLES = ''' +- name: Test module + netapp_e_auth: + name: trex + current_password: OldPasswd + new_password: NewPasswd + set_admin: yes + api_url: '{{ netapp_api_url }}' + api_username: '{{ netapp_api_username }}' + api_password: '{{ netapp_api_password }}' +''' + +RETURN = ''' +msg: + description: Success message + returned: success + type: str + sample: "Password Updated Successfully" +''' +import json +import traceback + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", + "x-netapp-password-validate-method": "none" + +} + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as e: + r = e.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def get_ssid(module, name, api_url, user, pwd): + count = 0 + all_systems = 'storage-systems' + systems_url = api_url + all_systems + rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd, + validate_certs=module.validate_certs) + for system in data: + if system['name'] == name: + count += 1 + if count > 1: + module.fail_json( + msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " + + "Use the id instead") + else: + ssid = system['id'] + else: + continue + + if count == 0: + module.fail_json(msg="No storage array with the name %s was found" % name) + + else: + return ssid + + +def get_pwd_status(module, ssid, api_url, user, pwd): + pwd_status = "storage-systems/%s/passwords" % ssid + url = api_url + pwd_status + try: + rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd, + validate_certs=module.validate_certs) + return data['readOnlyPasswordSet'], data['adminPasswordSet'] + except HTTPError as e: + module.fail_json(msg="There was an issue with connecting, please check that your " + "endpoint is properly defined and your credentials are correct: %s" % to_native(e)) + + +def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd): + """Update the stored storage-system password""" + update_pwd = 'storage-systems/%s' % ssid + url = api_url + update_pwd + post_body = json.dumps(dict(storedPassword=pwd)) + try: + rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr, + url_password=api_pwd, validate_certs=module.validate_certs) + return rc, data + except Exception as e: + module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, to_native(e))) + + +def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False): + """Set the storage-system password""" + set_pass = "storage-systems/%s/passwords" % ssid + url = api_url + set_pass + + if not current_password: + current_password = "" + + post_body = json.dumps( + dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password)) + + try: + rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd, + ignore_errors=True, validate_certs=module.validate_certs) + except Exception as e: + module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, to_native(e)), + exception=traceback.format_exc()) + + if rc == 422: + post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password)) + try: + rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd, + validate_certs=module.validate_certs) + except Exception: + # TODO(lorenp): Resolve ignored rc, data + module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again") + + if int(rc) >= 300: + module.fail_json(msg="Failed to set system password. Id [%s] Code [%s]. Error [%s]" % (ssid, rc, data)) + + rc, update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd) + + if int(rc) < 300: + return update_data + else: + module.fail_json(msg="%s:%s" % (rc, update_data)) + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + name=dict(required=False, type='str'), + ssid=dict(required=False, type='str'), + current_password=dict(required=False, no_log=True), + new_password=dict(required=True, no_log=True), + set_admin=dict(required=True, type='bool'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True) + ) + ) + module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']], + required_one_of=[['name', 'ssid']]) + + name = module.params['name'] + ssid = module.params['ssid'] + current_password = module.params['current_password'] + new_password = module.params['new_password'] + set_admin = module.params['set_admin'] + user = module.params['api_username'] + pwd = module.params['api_password'] + api_url = module.params['api_url'] + module.validate_certs = module.params['validate_certs'] + + if not api_url.endswith('/'): + api_url += '/' + + if name: + ssid = get_ssid(module, name, api_url, user, pwd) + + ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd) + + if admin_pwd and not current_password: + module.fail_json( + msg="Admin account has a password set. " + + "You must supply current_password in order to update the RO or Admin passwords") + + if len(new_password) > 30: + module.fail_json(msg="Passwords must not be greater than 30 characters in length") + + result = set_password(module, ssid, api_url, user, pwd, current_password=current_password, + new_password=new_password, set_admin=set_admin) + + module.exit_json(changed=True, msg="Password Updated Successfully", + password_set=result['passwordSet'], + password_status=result['passwordStatus']) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py new file mode 100644 index 000000000..e74bac776 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_drive_firmware.py @@ -0,0 +1,215 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_drive_firmware +version_added: "2.9" +short_description: NetApp E-Series manage drive firmware +description: + - Ensure drive firmware version is activated on specified drive model. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + firmware: + description: + - list of drive firmware file paths. + - NetApp E-Series drives require special firmware which can be downloaded from https://mysupport.netapp.com/NOW/download/tools/diskfw_eseries/ + type: list + required: True + wait_for_completion: + description: + - This flag will cause module to wait for any upgrade actions to complete. + type: bool + default: false + ignore_inaccessible_drives: + description: + - This flag will determine whether drive firmware upgrade should fail if any affected drives are inaccessible. + type: bool + default: false + upgrade_drives_online: + description: + - This flag will determine whether drive firmware can be upgrade while drives are accepting I/O. + - When I(upgrade_drives_online==False) stop all I/O before running task. + type: bool + default: true +""" +EXAMPLES = """ +- name: Ensure correct firmware versions + nac_santricity_drive_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + firmware: "path/to/drive_firmware" + wait_for_completion: true + ignore_inaccessible_drives: false +""" +RETURN = """ +msg: + description: Whether any drive firmware was upgraded and whether it is in progress. + type: str + returned: always + sample: + { changed: True, upgrade_in_process: True } +""" +import os +import re + +from time import sleep +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata +from ansible.module_utils._text import to_native, to_text, to_bytes + + +class NetAppESeriesDriveFirmware(NetAppESeriesModule): + WAIT_TIMEOUT_SEC = 60 * 15 + + def __init__(self): + ansible_options = dict( + firmware=dict(type="list", required=True), + wait_for_completion=dict(type="bool", default=False), + ignore_inaccessible_drives=dict(type="bool", default=False), + upgrade_drives_online=dict(type="bool", default=True)) + + super(NetAppESeriesDriveFirmware, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.firmware_list = args["firmware"] + self.wait_for_completion = args["wait_for_completion"] + self.ignore_inaccessible_drives = args["ignore_inaccessible_drives"] + self.upgrade_drives_online = args["upgrade_drives_online"] + + self.upgrade_list_cache = None + + self.upgrade_required_cache = None + self.upgrade_in_progress = False + self.drive_info_cache = None + + def upload_firmware(self): + """Ensure firmware has been upload prior to uploaded.""" + for firmware in self.firmware_list: + firmware_name = os.path.basename(firmware) + files = [("file", firmware_name, firmware)] + headers, data = create_multipart_formdata(files) + try: + rc, response = self.request("/files/drive", method="POST", headers=headers, data=data) + except Exception as error: + self.module.fail_json(msg="Failed to upload drive firmware [%s]. Array [%s]. Error [%s]." % (firmware_name, self.ssid, to_native(error))) + + def upgrade_list(self): + """Determine whether firmware is compatible with the specified drives.""" + if self.upgrade_list_cache is None: + self.upgrade_list_cache = list() + try: + rc, response = self.request("storage-systems/%s/firmware/drives" % self.ssid) + + # Create upgrade list, this ensures only the firmware uploaded is applied + for firmware in self.firmware_list: + filename = os.path.basename(firmware) + + for uploaded_firmware in response["compatibilities"]: + if uploaded_firmware["filename"] == filename: + + # Determine whether upgrade is required + drive_reference_list = [] + for drive in uploaded_firmware["compatibleDrives"]: + try: + rc, drive_info = self.request("storage-systems/%s/drives/%s" % (self.ssid, drive["driveRef"])) + + # Add drive references that are supported and differ from current firmware + if (drive_info["firmwareVersion"] != uploaded_firmware["firmwareVersion"] and + uploaded_firmware["firmwareVersion"] in uploaded_firmware["supportedFirmwareVersions"]): + + if self.ignore_inaccessible_drives or (not drive_info["offline"] and drive_info["available"]): + drive_reference_list.append(drive["driveRef"]) + + if not drive["onlineUpgradeCapable"] and self.upgrade_drives_online: + self.module.fail_json(msg="Drive is not capable of online upgrade. Array [%s]. Drive [%s]." + % (self.ssid, drive["driveRef"])) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve drive information. Array [%s]. Drive [%s]. Error [%s]." + % (self.ssid, drive["driveRef"], to_native(error))) + + if drive_reference_list: + self.upgrade_list_cache.extend([{"filename": filename, "driveRefList": drive_reference_list}]) + + except Exception as error: + self.module.fail_json(msg="Failed to complete compatibility and health check. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + return self.upgrade_list_cache + + def wait_for_upgrade_completion(self): + """Wait for drive firmware upgrade to complete.""" + drive_references = [reference for drive in self.upgrade_list() for reference in drive["driveRefList"]] + last_status = None + for attempt in range(int(self.WAIT_TIMEOUT_SEC / 5)): + try: + rc, response = self.request("storage-systems/%s/firmware/drives/state" % self.ssid) + + # Check drive status + for status in response["driveStatus"]: + last_status = status + if status["driveRef"] in drive_references: + if status["status"] == "okay": + continue + elif status["status"] in ["inProgress", "inProgressRecon", "pending", "notAttempted"]: + break + else: + self.module.fail_json(msg="Drive firmware upgrade failed. Array [%s]. Drive [%s]. Status [%s]." + % (self.ssid, status["driveRef"], status["status"])) + else: + self.upgrade_in_progress = False + break + except Exception as error: + self.module.fail_json(msg="Failed to retrieve drive status. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + sleep(5) + else: + self.module.fail_json(msg="Timed out waiting for drive firmware upgrade. Array [%s]. Status [%s]." % (self.ssid, last_status)) + + def upgrade(self): + """Apply firmware to applicable drives.""" + try: + rc, response = self.request("storage-systems/%s/firmware/drives/initiate-upgrade?onlineUpdate=%s" + % (self.ssid, "true" if self.upgrade_drives_online else "false"), method="POST", data=self.upgrade_list()) + self.upgrade_in_progress = True + except Exception as error: + self.module.fail_json(msg="Failed to upgrade drive firmware. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + if self.wait_for_completion: + self.wait_for_upgrade_completion() + + def apply(self): + """Apply firmware policy has been enforced on E-Series storage system.""" + self.upload_firmware() + + if self.upgrade_list() and not self.module.check_mode: + self.upgrade() + + self.module.exit_json(changed=True if self.upgrade_list() else False, + upgrade_in_process=self.upgrade_in_progress) + + +def main(): + drive_firmware = NetAppESeriesDriveFirmware() + drive_firmware.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py new file mode 100644 index 000000000..3734a477e --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_facts.py @@ -0,0 +1,530 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +module: netapp_e_facts +short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays +description: + - The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays. +version_added: '2.2' +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +''' + +EXAMPLES = """ +--- +- name: Get array facts + netapp_e_facts: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true +""" + +RETURN = """ + msg: + description: Success message + returned: on success + type: str + sample: + - Gathered facts for storage array. Array ID [1]. + - Gathered facts for web services proxy. + storage_array_facts: + description: provides details about the array, controllers, management interfaces, hostside interfaces, + driveside interfaces, disks, storage pools, volumes, snapshots, and features. + returned: on successful inquiry from from embedded web services rest api + type: complex + contains: + netapp_controllers: + description: storage array controller list that contains basic controller identification and status + type: complex + sample: + - [{"name": "A", "serial": "021632007299", "status": "optimal"}, + {"name": "B", "serial": "021632007300", "status": "failed"}] + netapp_disks: + description: drive list that contains identification, type, and status information for each drive + type: complex + sample: + - [{"available": false, + "firmware_version": "MS02", + "id": "01000000500003960C8B67880000000000000000", + "media_type": "ssd", + "product_id": "PX02SMU080 ", + "serial_number": "15R0A08LT2BA", + "status": "optimal", + "tray_ref": "0E00000000000000000000000000000000000000", + "usable_bytes": "799629205504" }] + netapp_driveside_interfaces: + description: drive side interface list that contains identification, type, and speed for each interface + type: complex + sample: + - [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }] + - [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }] + netapp_enabled_features: + description: specifies the enabled features on the storage array. + returned: on success + type: complex + sample: + - [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ] + netapp_host_groups: + description: specifies the host groups on the storage arrays. + returned: on success + type: complex + sample: + - [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }] + netapp_hosts: + description: specifies the hosts on the storage arrays. + returned: on success + type: complex + sample: + - [{ "id": "8203800000000000000000000000000000000000", + "name": "host1", + "group_id": "85000000600A098000A4B28D003610705C40B964", + "host_type_index": 28, + "ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" }, + { "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}] + netapp_host_types: + description: lists the available host types on the storage array. + returned: on success + type: complex + sample: + - [{ "index": 0, "type": "FactoryDefault" }, + { "index": 1, "type": "W2KNETNCL"}, + { "index": 2, "type": "SOL" }, + { "index": 5, "type": "AVT_4M" }, + { "index": 6, "type": "LNX" }, + { "index": 7, "type": "LnxALUA" }, + { "index": 8, "type": "W2KNETCL" }, + { "index": 9, "type": "AIX MPIO" }, + { "index": 10, "type": "VmwTPGSALUA" }, + { "index": 15, "type": "HPXTPGS" }, + { "index": 17, "type": "SolTPGSALUA" }, + { "index": 18, "type": "SVC" }, + { "index": 22, "type": "MacTPGSALUA" }, + { "index": 23, "type": "WinTPGSALUA" }, + { "index": 24, "type": "LnxTPGSALUA" }, + { "index": 25, "type": "LnxTPGSALUA_PM" }, + { "index": 26, "type": "ONTAP_ALUA" }, + { "index": 27, "type": "LnxTPGSALUA_SF" }, + { "index": 28, "type": "LnxDHALUA" }, + { "index": 29, "type": "ATTOClusterAllOS" }] + netapp_hostside_interfaces: + description: host side interface list that contains identification, configuration, type, speed, and + status information for each interface + type: complex + sample: + - [{"iscsi": + [{ "controller": "A", + "current_interface_speed": "10g", + "ipv4_address": "10.10.10.1", + "ipv4_enabled": true, + "ipv4_gateway": "10.10.10.1", + "ipv4_subnet_mask": "255.255.255.0", + "ipv6_enabled": false, + "iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76", + "link_status": "up", + "mtu": 9000, + "supported_interface_speeds": [ "10g" ] }]}] + netapp_management_interfaces: + description: management interface list that contains identification, configuration, and status for + each interface + type: complex + sample: + - [{"alias": "ict-2800-A", + "channel": 1, + "controller": "A", + "dns_config_method": "dhcp", + "dns_servers": [], + "ipv4_address": "10.1.1.1", + "ipv4_address_config_method": "static", + "ipv4_enabled": true, + "ipv4_gateway": "10.113.1.1", + "ipv4_subnet_mask": "255.255.255.0", + "ipv6_enabled": false, + "link_status": "up", + "mac_address": "00A098A81B5D", + "name": "wan0", + "ntp_config_method": "disabled", + "ntp_servers": [], + "remote_ssh_access": false }] + netapp_storage_array: + description: provides storage array identification, firmware version, and available capabilities + type: dict + sample: + - {"chassis_serial": "021540006043", + "firmware": "08.40.00.01", + "name": "ict-2800-11_40", + "wwn": "600A098000A81B5D0000000059D60C76", + "cacheBlockSizes": [4096, + 8192, + 16384, + 32768], + "supportedSegSizes": [8192, + 16384, + 32768, + 65536, + 131072, + 262144, + 524288]} + netapp_storage_pools: + description: storage pool list that contains identification and capacity information for each pool + type: complex + sample: + - [{"available_capacity": "3490353782784", + "id": "04000000600A098000A81B5D000002B45A953A61", + "name": "Raid6", + "total_capacity": "5399466745856", + "used_capacity": "1909112963072" }] + netapp_volumes: + description: storage volume list that contains identification and capacity information for each volume + type: complex + sample: + - [{"capacity": "5368709120", + "id": "02000000600A098000AAC0C3000002C45A952BAA", + "is_thin_provisioned": false, + "name": "5G", + "parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }] + netapp_workload_tags: + description: workload tag list + type: complex + sample: + - [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38", + "name": "ftp_server", + "workloadAttributes": [{"key": "use", + "value": "general"}]}] + netapp_volumes_by_initiators: + description: list of available volumes keyed by the mapped initiators. + type: complex + sample: + - {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E", + "meta_data": {"filetype": "xfs", "public": true}, + "name": "some_volume", + "workload_name": "test2_volumes", + "wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]} + snapshot_images: + description: snapshot image list that contains identification, capacity, and status information for each + snapshot image + type: complex + sample: + - [{"active_cow": true, + "creation_method": "user", + "id": "34000000600A098000A81B5D00630A965B0535AC", + "pit_capacity": "5368709120", + "reposity_cap_utilization": "0", + "rollback_source": false, + "status": "optimal" }] +""" + +from re import match +from pprint import pformat +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule + + +class Facts(NetAppESeriesModule): + def __init__(self): + web_services_version = "02.00.0000.0000" + super(Facts, self).__init__(ansible_options={}, + web_services_version=web_services_version, + supports_check_mode=True) + + def get_controllers(self): + """Retrieve a mapping of controller references to their labels.""" + controllers = list() + try: + rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." + % (self.ssid, str(err))) + + controllers.sort() + + controllers_dict = {} + i = ord('A') + for controller in controllers: + label = chr(i) + controllers_dict[controller] = label + i += 1 + + return controllers_dict + + def get_array_facts(self): + """Extract particular facts from the storage array graph""" + facts = dict(facts_from_proxy=(not self.is_embedded()), ssid=self.ssid) + controller_reference_label = self.get_controllers() + array_facts = None + + # Get the storage array graph + try: + rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]" % (self.ssid, str(error))) + + facts['netapp_storage_array'] = dict( + name=array_facts['sa']['saData']['storageArrayLabel'], + chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'], + firmware=array_facts['sa']['saData']['fwVersion'], + wwn=array_facts['sa']['saData']['saId']['worldWideName'], + segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'], + cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes']) + + facts['netapp_controllers'] = [ + dict( + name=controller_reference_label[controller['controllerRef']], + serial=controller['serialNumber'].strip(), + status=controller['status'], + ) for controller in array_facts['controller']] + + facts['netapp_host_groups'] = [ + dict( + id=group['id'], + name=group['name'] + ) for group in array_facts['storagePoolBundle']['cluster']] + + facts['netapp_hosts'] = [ + dict( + group_id=host['clusterRef'], + hosts_reference=host['hostRef'], + id=host['id'], + name=host['name'], + host_type_index=host['hostTypeIndex'], + posts=host['hostSidePorts'] + ) for host in array_facts['storagePoolBundle']['host']] + + facts['netapp_host_types'] = [ + dict( + type=host_type['hostType'], + index=host_type['index'] + ) for host_type in array_facts['sa']['hostSpecificVals'] + if 'hostType' in host_type.keys() and host_type['hostType'] + # This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared. + ] + facts['snapshot_images'] = [ + dict( + id=snapshot['id'], + status=snapshot['status'], + pit_capacity=snapshot['pitCapacity'], + creation_method=snapshot['creationMethod'], + reposity_cap_utilization=snapshot['repositoryCapacityUtilization'], + active_cow=snapshot['activeCOW'], + rollback_source=snapshot['isRollbackSource'] + ) for snapshot in array_facts['highLevelVolBundle']['pit']] + + facts['netapp_disks'] = [ + dict( + id=disk['id'], + available=disk['available'], + media_type=disk['driveMediaType'], + status=disk['status'], + usable_bytes=disk['usableCapacity'], + tray_ref=disk['physicalLocation']['trayRef'], + product_id=disk['productID'], + firmware_version=disk['firmwareVersion'], + serial_number=disk['serialNumber'].lstrip() + ) for disk in array_facts['drive']] + + facts['netapp_management_interfaces'] = [ + dict(controller=controller_reference_label[controller['controllerRef']], + name=iface['ethernet']['interfaceName'], + alias=iface['ethernet']['alias'], + channel=iface['ethernet']['channel'], + mac_address=iface['ethernet']['macAddr'], + remote_ssh_access=iface['ethernet']['rloginEnabled'], + link_status=iface['ethernet']['linkStatus'], + ipv4_enabled=iface['ethernet']['ipv4Enabled'], + ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""), + ipv4_address=iface['ethernet']['ipv4Address'], + ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'], + ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'], + ipv6_enabled=iface['ethernet']['ipv6Enabled'], + dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'], + dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] + if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []), + ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'], + ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] + if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else []) + ) for controller in array_facts['controller'] for iface in controller['netInterfaces']] + + facts['netapp_hostside_interfaces'] = [ + dict( + fc=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['fibre']['channel'], + link_status=iface['fibre']['linkStatus'], + current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']), + maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'fc'], + ib=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['ib']['channel'], + link_status=iface['ib']['linkState'], + mtu=iface['ib']['maximumTransmissionUnit'], + current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']), + maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'ib'], + iscsi=[dict(controller=controller_reference_label[controller['controllerRef']], + iqn=iface['iscsi']['iqn'], + link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'], + ipv4_enabled=iface['iscsi']['ipv4Enabled'], + ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'], + ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'], + ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'], + ipv6_enabled=iface['iscsi']['ipv6Enabled'], + mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'], + current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData'] + ['ethernetData']['currentInterfaceSpeed']), + supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData'] + ['ethernetData'] + ['supportedInterfaceSpeeds'])) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'iscsi'], + sas=[dict(controller=controller_reference_label[controller['controllerRef']], + channel=iface['sas']['channel'], + current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']), + maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']), + link_status=iface['sas']['iocPort']['state']) + for controller in array_facts['controller'] + for iface in controller['hostInterfaces'] + if iface['interfaceType'] == 'sas'])] + + facts['netapp_driveside_interfaces'] = [ + dict( + controller=controller_reference_label[controller['controllerRef']], + interface_type=interface['interfaceType'], + interface_speed=strip_interface_speed( + interface[interface['interfaceType']]['maximumInterfaceSpeed'] + if (interface['interfaceType'] == 'sata' or + interface['interfaceType'] == 'sas' or + interface['interfaceType'] == 'fibre') + else ( + interface[interface['interfaceType']]['currentSpeed'] + if interface['interfaceType'] == 'ib' + else ( + interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed'] + if interface['interfaceType'] == 'iscsi' else 'unknown' + ))), + ) + for controller in array_facts['controller'] + for interface in controller['driveInterfaces']] + + facts['netapp_storage_pools'] = [ + dict( + id=storage_pool['id'], + name=storage_pool['name'], + available_capacity=storage_pool['freeSpace'], + total_capacity=storage_pool['totalRaidedSpace'], + used_capacity=storage_pool['usedSpace'] + ) for storage_pool in array_facts['volumeGroup']] + + all_volumes = list(array_facts['volume']) + + facts['netapp_volumes'] = [ + dict( + id=v['id'], + name=v['name'], + parent_storage_pool_id=v['volumeGroupRef'], + capacity=v['capacity'], + is_thin_provisioned=v['thinProvisioned'], + workload=v['metadata'], + ) for v in all_volumes] + + workload_tags = None + try: + rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid) + + facts['netapp_workload_tags'] = [ + dict( + id=workload_tag['id'], + name=workload_tag['name'], + attributes=workload_tag['workloadAttributes'] + ) for workload_tag in workload_tags] + + # Create a dictionary of volume lists keyed by host names + facts['netapp_volumes_by_initiators'] = dict() + for mapping in array_facts['storagePoolBundle']['lunMapping']: + for host in facts['netapp_hosts']: + if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']: + if host['name'] not in facts['netapp_volumes_by_initiators'].keys(): + facts['netapp_volumes_by_initiators'].update({host['name']: []}) + + for volume in all_volumes: + if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]: + + # Determine workload name if there is one + workload_name = "" + metadata = dict() + for volume_tag in volume['metadata']: + if volume_tag['key'] == 'workloadId': + for workload_tag in facts['netapp_workload_tags']: + if volume_tag['value'] == workload_tag['id']: + workload_name = workload_tag['name'] + metadata = dict((entry['key'], entry['value']) + for entry in workload_tag['attributes'] + if entry['key'] != 'profileId') + + facts['netapp_volumes_by_initiators'][host['name']].append( + dict(name=volume['name'], + id=volume['id'], + wwn=volume['wwn'], + workload_name=workload_name, + meta_data=metadata)) + + features = [feature for feature in array_facts['sa']['capabilities']] + features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures'] + if feature['isEnabled']]) + features = list(set(features)) # ensure unique + features.sort() + facts['netapp_enabled_features'] = features + + return facts + + def get_facts(self): + """Get the embedded or web services proxy information.""" + facts = self.get_array_facts() + + self.module.log("isEmbedded: %s" % self.is_embedded()) + self.module.log(pformat(facts)) + + self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid, + storage_array_facts=facts) + + +def strip_interface_speed(speed): + """Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'""" + if isinstance(speed, list): + result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed] + result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp] + result = ["auto" if match(r"auto", sp) else sp for sp in result] + else: + result = match(r"speed[0-9]{1,3}[gm]", speed) + result = result.group().replace("speed", "") if result else "unknown" + result = "auto" if match(r"auto", result.lower()) else result + return result + + +def main(): + facts = Facts() + facts.get_facts() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py new file mode 100644 index 000000000..c2f7f7457 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_firmware.py @@ -0,0 +1,488 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_firmware +version_added: "2.9" +short_description: NetApp E-Series manage firmware. +description: + - Ensure specific firmware versions are activated on E-Series storage system. +author: + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + nvsram: + description: + - Path to the NVSRAM file. + type: str + required: true + firmware: + description: + - Path to the firmware file. + type: str + required: true + wait_for_completion: + description: + - This flag will cause module to wait for any upgrade actions to complete. + type: bool + default: false + ignore_health_check: + description: + - This flag will force firmware to be activated in spite of the health check. + - Use at your own risk. Certain non-optimal states could result in data loss. + type: bool + default: false +""" +EXAMPLES = """ +- name: Ensure correct firmware versions + netapp_e_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + nvsram: "path/to/nvsram" + bundle: "path/to/bundle" + wait_for_completion: true +- name: Ensure correct firmware versions + netapp_e_firmware: + ssid: "1" + api_url: "https://192.168.1.100:8443/devmgr/v2" + api_username: "admin" + api_password: "adminpass" + validate_certs: true + nvsram: "path/to/nvsram" + firmware: "path/to/firmware" +""" +RETURN = """ +msg: + description: Status and version of firmware and NVSRAM. + type: str + returned: always + sample: +""" +import os + +from time import sleep +from ansible.module_utils import six +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule, create_multipart_formdata, request +from ansible.module_utils._text import to_native, to_text, to_bytes + + +class NetAppESeriesFirmware(NetAppESeriesModule): + HEALTH_CHECK_TIMEOUT_MS = 120000 + REBOOT_TIMEOUT_SEC = 15 * 60 + FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC = 60 + DEFAULT_TIMEOUT = 60 * 15 # This will override the NetAppESeriesModule request method timeout. + + def __init__(self): + ansible_options = dict( + nvsram=dict(type="str", required=True), + firmware=dict(type="str", required=True), + wait_for_completion=dict(type="bool", default=False), + ignore_health_check=dict(type="bool", default=False)) + + super(NetAppESeriesFirmware, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True) + + args = self.module.params + self.nvsram = args["nvsram"] + self.firmware = args["firmware"] + self.wait_for_completion = args["wait_for_completion"] + self.ignore_health_check = args["ignore_health_check"] + + self.nvsram_name = None + self.firmware_name = None + self.is_bundle_cache = None + self.firmware_version_cache = None + self.nvsram_version_cache = None + self.upgrade_required = False + self.upgrade_in_progress = False + self.module_info = dict() + + self.nvsram_name = os.path.basename(self.nvsram) + self.firmware_name = os.path.basename(self.firmware) + + def is_firmware_bundled(self): + """Determine whether supplied firmware is bundle.""" + if self.is_bundle_cache is None: + with open(self.firmware, "rb") as fh: + signature = fh.read(16).lower() + + if b"firmware" in signature: + self.is_bundle_cache = False + elif b"combined_content" in signature: + self.is_bundle_cache = True + else: + self.module.fail_json(msg="Firmware file is invalid. File [%s]. Array [%s]" % (self.firmware, self.ssid)) + + return self.is_bundle_cache + + def firmware_version(self): + """Retrieve firmware version of the firmware file. Return: bytes string""" + if self.firmware_version_cache is None: + + # Search firmware file for bundle or firmware version + with open(self.firmware, "rb") as fh: + line = fh.readline() + while line: + if self.is_firmware_bundled(): + if b'displayableAttributeList=' in line: + for item in line[25:].split(b','): + key, value = item.split(b"|") + if key == b'VERSION': + self.firmware_version_cache = value.strip(b"\n") + break + elif b"Version:" in line: + self.firmware_version_cache = line.split()[-1].strip(b"\n") + break + line = fh.readline() + else: + self.module.fail_json(msg="Failed to determine firmware version. File [%s]. Array [%s]." % (self.firmware, self.ssid)) + return self.firmware_version_cache + + def nvsram_version(self): + """Retrieve NVSRAM version of the NVSRAM file. Return: byte string""" + if self.nvsram_version_cache is None: + + with open(self.nvsram, "rb") as fh: + line = fh.readline() + while line: + if b".NVSRAM Configuration Number" in line: + self.nvsram_version_cache = line.split(b'"')[-2] + break + line = fh.readline() + else: + self.module.fail_json(msg="Failed to determine NVSRAM file version. File [%s]. Array [%s]." % (self.nvsram, self.ssid)) + return self.nvsram_version_cache + + def check_system_health(self): + """Ensure E-Series storage system is healthy. Works for both embedded and proxy web services.""" + try: + rc, request_id = self.request("health-check", method="POST", data={"onlineOnly": True, "storageDeviceIds": [self.ssid]}) + + while True: + sleep(1) + + try: + rc, response = self.request("health-check?requestId=%s" % request_id["requestId"]) + + if not response["healthCheckRunning"]: + return response["results"][0]["successful"] + elif int(response["results"][0]["processingTimeMS"]) > self.HEALTH_CHECK_TIMEOUT_MS: + self.module.fail_json(msg="Health check failed to complete. Array Id [%s]." % self.ssid) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + except Exception as error: + self.module.fail_json(msg="Failed to initiate health check. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + self.module.fail_json(msg="Failed to retrieve health check status. Array Id [%s]. Error[%s]." % self.ssid) + + def embedded_check_compatibility(self): + """Verify files are compatible with E-Series storage system.""" + self.embedded_check_nvsram_compatibility() + self.embedded_check_bundle_compatibility() + + def embedded_check_nvsram_compatibility(self): + """Verify the provided NVSRAM is compatible with E-Series storage system.""" + + # Check nvsram compatibility + try: + files = [("nvsramimage", self.nvsram_name, self.nvsram)] + headers, data = create_multipart_formdata(files=files) + + rc, nvsram_compatible = self.request("firmware/embedded-firmware/%s/nvsram-compatibility-check" % self.ssid, + method="POST", data=data, headers=headers) + + if not nvsram_compatible["signatureTestingPassed"]: + self.module.fail_json(msg="Invalid NVSRAM file. File [%s]." % self.nvsram) + if not nvsram_compatible["fileCompatible"]: + self.module.fail_json(msg="Incompatible NVSRAM file. File [%s]." % self.nvsram) + + # Determine whether nvsram is required + for module in nvsram_compatible["versionContents"]: + if module["bundledVersion"] != module["onboardVersion"]: + self.upgrade_required = True + + # Update bundle info + self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}}) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve NVSRAM compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + def embedded_check_bundle_compatibility(self): + """Verify the provided firmware bundle is compatible with E-Series storage system.""" + try: + files = [("files[]", "blob", self.firmware)] + headers, data = create_multipart_formdata(files=files, send_8kb=True) + rc, bundle_compatible = self.request("firmware/embedded-firmware/%s/bundle-compatibility-check" % self.ssid, + method="POST", data=data, headers=headers) + + # Determine whether valid and compatible firmware + if not bundle_compatible["signatureTestingPassed"]: + self.module.fail_json(msg="Invalid firmware bundle file. File [%s]." % self.firmware) + if not bundle_compatible["fileCompatible"]: + self.module.fail_json(msg="Incompatible firmware bundle file. File [%s]." % self.firmware) + + # Determine whether upgrade is required + for module in bundle_compatible["versionContents"]: + + bundle_module_version = module["bundledVersion"].split(".") + onboard_module_version = module["onboardVersion"].split(".") + version_minimum_length = min(len(bundle_module_version), len(onboard_module_version)) + if bundle_module_version[:version_minimum_length] != onboard_module_version[:version_minimum_length]: + self.upgrade_required = True + + # Check whether downgrade is being attempted + bundle_version = module["bundledVersion"].split(".")[:2] + onboard_version = module["onboardVersion"].split(".")[:2] + if bundle_version[0] < onboard_version[0] or (bundle_version[0] == onboard_version[0] and bundle_version[1] < onboard_version[1]): + self.module.fail_json(msg="Downgrades are not permitted. onboard [%s] > bundled[%s]." + % (module["onboardVersion"], module["bundledVersion"])) + + # Update bundle info + self.module_info.update({module["module"]: {"onboard_version": module["onboardVersion"], "bundled_version": module["bundledVersion"]}}) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve bundle compatibility results. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + + def embedded_wait_for_upgrade(self): + """Wait for SANtricity Web Services Embedded to be available after reboot.""" + for count in range(0, self.REBOOT_TIMEOUT_SEC): + try: + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData" % self.ssid) + bundle_display = [m["versionString"] for m in response[0]["extendedSAData"]["codeVersions"] if m["codeModule"] == "bundleDisplay"][0] + if rc == 200 and six.b(bundle_display) == self.firmware_version() and six.b(response[0]["nvsramVersion"]) == self.nvsram_version(): + self.upgrade_in_progress = False + break + except Exception as error: + pass + sleep(1) + else: + self.module.fail_json(msg="Timeout waiting for Santricity Web Services Embedded. Array [%s]" % self.ssid) + + def embedded_upgrade(self): + """Upload and activate both firmware and NVSRAM.""" + files = [("nvsramfile", self.nvsram_name, self.nvsram), + ("dlpfile", self.firmware_name, self.firmware)] + headers, data = create_multipart_formdata(files=files) + try: + rc, response = self.request("firmware/embedded-firmware?staged=false&nvsram=true", method="POST", data=data, headers=headers) + self.upgrade_in_progress = True + except Exception as error: + self.module.fail_json(msg="Failed to upload and activate firmware. Array Id [%s]. Error[%s]." % (self.ssid, to_native(error))) + if self.wait_for_completion: + self.embedded_wait_for_upgrade() + + def proxy_check_nvsram_compatibility(self): + """Verify nvsram is compatible with E-Series storage system.""" + data = {"storageDeviceIds": [self.ssid]} + try: + rc, check = self.request("firmware/compatibility-check", method="POST", data=data) + for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))): + sleep(5) + try: + rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"]) + if not response["checkRunning"]: + for result in response["results"][0]["nvsramFiles"]: + if result["filename"] == self.nvsram_name: + return + self.module.fail_json(msg="NVSRAM is not compatible. NVSRAM [%s]. Array [%s]." % (self.nvsram_name, self.ssid)) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve NVSRAM status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + except Exception as error: + self.module.fail_json(msg="Failed to receive NVSRAM compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def proxy_check_firmware_compatibility(self): + """Verify firmware is compatible with E-Series storage system.""" + data = {"storageDeviceIds": [self.ssid]} + try: + rc, check = self.request("firmware/compatibility-check", method="POST", data=data) + for count in range(0, int((self.FIRMWARE_COMPATIBILITY_CHECK_TIMEOUT_SEC / 5))): + sleep(5) + try: + rc, response = self.request("firmware/compatibility-check?requestId=%s" % check["requestId"]) + if not response["checkRunning"]: + for result in response["results"][0]["cfwFiles"]: + if result["filename"] == self.firmware_name: + return + self.module.fail_json(msg="Firmware bundle is not compatible. firmware [%s]. Array [%s]." % (self.firmware_name, self.ssid)) + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve firmware status update from proxy. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + except Exception as error: + self.module.fail_json(msg="Failed to receive firmware compatibility information. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + def proxy_upload_and_check_compatibility(self): + """Ensure firmware is uploaded and verify compatibility.""" + try: + rc, cfw_files = self.request("firmware/cfw-files") + for file in cfw_files: + if file["filename"] == self.nvsram_name: + break + else: + fields = [("validate", "true")] + files = [("firmwareFile", self.nvsram_name, self.nvsram)] + headers, data = create_multipart_formdata(files=files, fields=fields) + try: + rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers) + except Exception as error: + self.module.fail_json(msg="Failed to upload NVSRAM file. File [%s]. Array [%s]. Error [%s]." + % (self.nvsram_name, self.ssid, to_native(error))) + + self.proxy_check_nvsram_compatibility() + + for file in cfw_files: + if file["filename"] == self.firmware_name: + break + else: + fields = [("validate", "true")] + files = [("firmwareFile", self.firmware_name, self.firmware)] + headers, data = create_multipart_formdata(files=files, fields=fields) + try: + rc, response = self.request("firmware/upload", method="POST", data=data, headers=headers) + except Exception as error: + self.module.fail_json(msg="Failed to upload firmware bundle file. File [%s]. Array [%s]. Error [%s]." + % (self.firmware_name, self.ssid, to_native(error))) + + self.proxy_check_firmware_compatibility() + except Exception as error: + self.module.fail_json(msg="Failed to retrieve existing existing firmware files. Error [%s]" % to_native(error)) + + def proxy_check_upgrade_required(self): + """Staging is required to collect firmware information from the web services proxy.""" + # Verify controller consistency and get firmware versions + try: + # Retrieve current bundle version + if self.is_firmware_bundled(): + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/controller/codeVersions[codeModule='bundleDisplay']" % self.ssid) + current_firmware_version = six.b(response[0]["versionString"]) + else: + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid) + current_firmware_version = six.b(response[0]) + + # Determine whether upgrade is required + if current_firmware_version != self.firmware_version(): + + current = current_firmware_version.split(b".")[:2] + upgrade = self.firmware_version().split(b".")[:2] + if current[0] < upgrade[0] or (current[0] == upgrade[0] and current[1] <= upgrade[1]): + self.upgrade_required = True + else: + self.module.fail_json(msg="Downgrades are not permitted. Firmware [%s]. Array [%s]." % (self.firmware, self.ssid)) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve controller firmware information. Array [%s]. Error [%s]" % (self.ssid, to_native(error))) + # Determine current NVSRAM version and whether change is required + try: + rc, response = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid) + if six.b(response[0]) != self.nvsram_version(): + self.upgrade_required = True + + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage system's NVSRAM version. Array [%s]. Error [%s]" % (self.ssid, to_native(error))) + + def proxy_wait_for_upgrade(self, request_id): + """Wait for SANtricity Web Services Proxy to report upgrade complete""" + if self.is_firmware_bundled(): + while True: + try: + sleep(5) + rc, response = self.request("batch/cfw-upgrade/%s" % request_id) + + if response["status"] == "complete": + self.upgrade_in_progress = False + break + elif response["status"] in ["failed", "cancelled"]: + self.module.fail_json(msg="Firmware upgrade failed to complete. Array [%s]." % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve firmware upgrade status. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + else: + for count in range(0, int(self.REBOOT_TIMEOUT_SEC / 5)): + try: + sleep(5) + rc_firmware, firmware = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/fwVersion" % self.ssid) + rc_nvsram, nvsram = self.request("storage-systems/%s/graph/xpath-filter?query=/sa/saData/nvsramVersion" % self.ssid) + + if six.b(firmware[0]) == self.firmware_version() and six.b(nvsram[0]) == self.nvsram_version(): + self.upgrade_in_progress = False + break + except Exception as error: + pass + else: + self.module.fail_json(msg="Timed out waiting for firmware upgrade to complete. Array [%s]." % self.ssid) + + def proxy_upgrade(self): + """Activate previously uploaded firmware related files.""" + request_id = None + if self.is_firmware_bundled(): + data = {"activate": True, + "firmwareFile": self.firmware_name, + "nvsramFile": self.nvsram_name, + "systemInfos": [{"systemId": self.ssid, + "allowNonOptimalActivation": self.ignore_health_check}]} + try: + rc, response = self.request("batch/cfw-upgrade", method="POST", data=data) + request_id = response["requestId"] + except Exception as error: + self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + else: + data = {"stageFirmware": False, + "skipMelCheck": self.ignore_health_check, + "cfwFile": self.firmware_name, + "nvsramFile": self.nvsram_name} + try: + rc, response = self.request("storage-systems/%s/cfw-upgrade" % self.ssid, method="POST", data=data) + request_id = response["requestId"] + except Exception as error: + self.module.fail_json(msg="Failed to initiate firmware upgrade. Array [%s]. Error [%s]." % (self.ssid, to_native(error))) + + self.upgrade_in_progress = True + if self.wait_for_completion: + self.proxy_wait_for_upgrade(request_id) + + def apply(self): + """Upgrade controller firmware.""" + self.check_system_health() + + # Verify firmware compatibility and whether changes are required + if self.is_embedded(): + self.embedded_check_compatibility() + else: + self.proxy_check_upgrade_required() + + # This will upload the firmware files to the web services proxy but not to the controller + if self.upgrade_required: + self.proxy_upload_and_check_compatibility() + + # Perform upgrade + if self.upgrade_required and not self.module.check_mode: + if self.is_embedded(): + self.embedded_upgrade() + else: + self.proxy_upgrade() + + self.module.exit_json(changed=self.upgrade_required, upgrade_in_process=self.upgrade_in_progress, status=self.module_info) + + +def main(): + firmware = NetAppESeriesFirmware() + firmware.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py new file mode 100644 index 000000000..3ffacedda --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_flashcache.py @@ -0,0 +1,442 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: netapp_e_flashcache +author: Kevin Hulquest (@hulquest) +version_added: '2.2' +short_description: NetApp E-Series manage SSD caches +description: +- Create or remove SSD caches on a NetApp E-Series storage array. +options: + api_username: + required: true + type: str + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_password: + required: true + type: str + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + api_url: + required: true + type: str + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + ssid: + required: true + type: str + description: + - The ID of the array to manage (as configured on the web services proxy). + state: + required: true + type: str + description: + - Whether the specified SSD cache should exist or not. + choices: ['present', 'absent'] + default: present + name: + required: true + type: str + description: + - The name of the SSD cache to manage + io_type: + description: + - The type of workload to optimize the cache for. + choices: ['filesystem','database','media'] + default: filesystem + type: str + disk_count: + type: int + description: + - The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place + disk_refs: + description: + - List of disk references + type: list + size_unit: + description: + - The unit to be applied to size arguments + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + default: gb + type: str + cache_size_min: + description: + - The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache. + type: int + criteria_disk_phy_type: + description: + - Type of physical disk + choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'] + type: str + log_mode: + type: str + description: + - Log mode + log_path: + type: str + description: + - Log path +''' + +EXAMPLES = """ + - name: Flash Cache + netapp_e_flashcache: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + name: SSDCacheBuiltByAnsible +""" + +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: json for newly created flash cache +""" +import json +import logging +import sys +import traceback + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.six.moves import reduce +from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class NetAppESeriesFlashCache(object): + def __init__(self): + self.name = None + self.log_mode = None + self.log_path = None + self.api_url = None + self.api_username = None + self.api_password = None + self.ssid = None + self.validate_certs = None + self.disk_count = None + self.size_unit = None + self.cache_size_min = None + self.io_type = None + self.driveRefs = None + self.state = None + self._size_unit_map = dict( + bytes=1, + b=1, + kb=1024, + mb=1024 ** 2, + gb=1024 ** 3, + tb=1024 ** 4, + pb=1024 ** 5, + eb=1024 ** 6, + zb=1024 ** 7, + yb=1024 ** 8 + ) + + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + state=dict(default='present', choices=['present', 'absent'], type='str'), + ssid=dict(required=True, type='str'), + name=dict(required=True, type='str'), + disk_count=dict(type='int'), + disk_refs=dict(type='list'), + cache_size_min=dict(type='int'), + io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']), + size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], + type='str'), + criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'], + type='str'), + log_mode=dict(type='str'), + log_path=dict(type='str'), + )) + self.module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + + ], + mutually_exclusive=[ + + ], + # TODO: update validation for various selection criteria + supports_check_mode=True + ) + + self.__dict__.update(self.module.params) + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + self.debug = self._logger.debug + + if self.log_mode == 'file' and self.log_path: + logging.basicConfig(level=logging.DEBUG, filename=self.log_path) + elif self.log_mode == 'stderr': + logging.basicConfig(level=logging.DEBUG, stream=sys.stderr) + + self.post_headers = dict(Accept="application/json") + self.post_headers['Content-Type'] = 'application/json' + + def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None): + self.debug("getting candidate disks...") + + drives_req = dict( + driveCount=disk_count, + sizeUnit=size_unit, + driveType='ssd', + ) + + if capacity: + drives_req['targetUsableCapacity'] = capacity + + (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), + data=json.dumps(drives_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + if rc == 204: + self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache') + + disk_ids = [d['id'] for d in drives_resp] + bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0) + + return (disk_ids, bytes) + + def create_cache(self): + (disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit, + capacity=self.cache_size_min) + + self.debug("creating ssd cache...") + + create_fc_req = dict( + driveRefs=disk_ids, + name=self.name + ) + + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), + data=json.dumps(create_fc_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def update_cache(self): + self.debug('updating flash cache config...') + update_fc_req = dict( + name=self.name, + configType=self.io_type + ) + + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid), + data=json.dumps(update_fc_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def delete_cache(self): + self.debug('deleting flash cache...') + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs, ignore_errors=True) + + @property + def needs_more_disks(self): + if len(self.cache_detail['driveRefs']) < self.disk_count: + self.debug("needs resize: current disk count %s < requested requested count %s", + len(self.cache_detail['driveRefs']), self.disk_count) + return True + + @property + def needs_less_disks(self): + if len(self.cache_detail['driveRefs']) > self.disk_count: + self.debug("needs resize: current disk count %s < requested requested count %s", + len(self.cache_detail['driveRefs']), self.disk_count) + return True + + @property + def current_size_bytes(self): + return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity']) + + @property + def requested_size_bytes(self): + if self.cache_size_min: + return self.cache_size_min * self._size_unit_map[self.size_unit] + else: + return 0 + + @property + def needs_more_capacity(self): + if self.current_size_bytes < self.requested_size_bytes: + self.debug("needs resize: current capacity %sb is less than requested minimum %sb", + self.current_size_bytes, self.requested_size_bytes) + return True + + @property + def needs_resize(self): + return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks + + def resize_cache(self): + # increase up to disk count first, then iteratively add disks until we meet requested capacity + + # TODO: perform this calculation in check mode + current_disk_count = len(self.cache_detail['driveRefs']) + proposed_new_disks = 0 + + proposed_additional_bytes = 0 + proposed_disk_ids = [] + + if self.needs_more_disks: + proposed_disk_count = self.disk_count - current_disk_count + + (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count) + proposed_additional_bytes = bytes + proposed_disk_ids = disk_ids + + while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes: + proposed_new_disks += 1 + (disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks) + proposed_disk_ids = disk_ids + proposed_additional_bytes = bytes + + add_drives_req = dict( + driveRef=proposed_disk_ids + ) + + self.debug("adding drives to flash-cache...") + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid), + data=json.dumps(add_drives_req), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + elif self.needs_less_disks and self.driveRefs: + rm_drives = dict(driveRef=self.driveRefs) + (rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid), + data=json.dumps(rm_drives), headers=self.post_headers, method='POST', + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs) + + def apply(self): + result = dict(changed=False) + (rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), + url_username=self.api_username, url_password=self.api_password, + validate_certs=self.validate_certs, ignore_errors=True) + + if rc == 200: + self.cache_detail = cache_resp + else: + self.cache_detail = None + + if rc not in [200, 404]: + raise Exception( + "Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp)) + + if self.state == 'present': + if self.cache_detail: + # TODO: verify parameters against detail for changes + if self.cache_detail['name'] != self.name: + self.debug("CHANGED: name differs") + result['changed'] = True + if self.cache_detail['flashCacheBase']['configType'] != self.io_type: + self.debug("CHANGED: io_type differs") + result['changed'] = True + if self.needs_resize: + self.debug("CHANGED: resize required") + result['changed'] = True + else: + self.debug("CHANGED: requested state is 'present' but cache does not exist") + result['changed'] = True + else: # requested state is absent + if self.cache_detail: + self.debug("CHANGED: requested state is 'absent' but cache exists") + result['changed'] = True + + if not result['changed']: + self.debug("no changes, exiting...") + self.module.exit_json(**result) + + if self.module.check_mode: + self.debug("changes pending in check mode, exiting early...") + self.module.exit_json(**result) + + if self.state == 'present': + if not self.cache_detail: + self.create_cache() + else: + if self.needs_resize: + self.resize_cache() + + # run update here as well, since io_type can't be set on creation + self.update_cache() + + elif self.state == 'absent': + self.delete_cache() + + # TODO: include other details about the storage pool (size, type, id, etc) + self.module.exit_json(changed=result['changed'], **self.resp) + + +def main(): + sp = NetAppESeriesFlashCache() + try: + sp.apply() + except Exception as e: + sp.debug("Exception in apply(): \n%s", to_native(e)) + sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e), + exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py new file mode 100644 index 000000000..1284b2891 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_global.py @@ -0,0 +1,159 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_global +short_description: NetApp E-Series manage global settings configuration +description: + - Allow the user to configure several of the global settings associated with an E-Series storage-system +version_added: '2.7' +author: Michael Price (@lmprice) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + name: + description: + - Set the name of the E-Series storage-system + - This label/name doesn't have to be unique. + - May be up to 30 characters in length. + type: str + aliases: + - label + log_path: + description: + - A local path to a file to be used for debug logging + required: no + type: str +notes: + - Check mode is supported. + - This module requires Web Services API v1.3 or newer. +""" + +EXAMPLES = """ + - name: Set the storage-system name + netapp_e_global: + name: myArrayName + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +name: + description: + - The current name/label of the storage-system. + returned: on success + sample: myArrayName + type: str +""" +import json +import logging + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class GlobalSettings(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=False, aliases=['label']), + log_path=dict(type='str', required=False), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, ) + args = self.module.params + self.name = args['name'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + if self.name and len(self.name) > 30: + self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.") + + def get_name(self): + try: + (rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds) + if result['status'] in ['offline', 'neverContacted']: + self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid)) + return result['name'] + except Exception as err: + self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + def update_name(self): + name = self.get_name() + update = False + if self.name != name: + update = True + + body = dict(name=self.name) + + if update and not self.check_mode: + try: + (rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST', + data=json.dumps(body), headers=HEADERS, **self.creds) + self._logger.info("Set name to %s.", result['name']) + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json( + msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return update + + def update(self): + update = self.update_name() + name = self.get_name() + + self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + settings = GlobalSettings() + settings() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py new file mode 100644 index 000000000..699087f6c --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_host.py @@ -0,0 +1,544 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2018, NetApp Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_host +short_description: NetApp E-Series manage eseries hosts +description: Create, update, remove hosts on NetApp E-series storage arrays +version_added: '2.2' +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + name: + description: + - If the host doesn't yet exist, the label/name to assign at creation time. + - If the hosts already exists, this will be used to uniquely identify the host to make any required changes + required: True + type: str + aliases: + - label + state: + description: + - Set to absent to remove an existing host + - Set to present to modify or create a new host definition + choices: + - absent + - present + default: present + type: str + version_added: 2.7 + host_type: + description: + - This is the type of host to be mapped + - Required when C(state=present) + - Either one of the following names can be specified, Linux DM-MP, VMWare, Windows, Windows Clustered, or a + host type index which can be found in M(netapp_e_facts) + type: str + aliases: + - host_type_index + ports: + description: + - A list of host ports you wish to associate with the host. + - Host ports are uniquely identified by their WWN or IQN. Their assignments to a particular host are + uniquely identified by a label and these must be unique. + required: False + type: list + suboptions: + type: + description: + - The interface type of the port to define. + - Acceptable choices depend on the capabilities of the target hardware/software platform. + required: true + choices: + - iscsi + - sas + - fc + - ib + - nvmeof + - ethernet + label: + description: + - A unique label to assign to this port assignment. + required: true + port: + description: + - The WWN or IQN of the hostPort to assign to this port definition. + required: true + force_port: + description: + - Allow ports that are already assigned to be re-assigned to your current host + required: false + type: bool + version_added: 2.7 + group: + description: + - The unique identifier of the host-group you want the host to be a member of; this is used for clustering. + required: False + type: str + aliases: + - cluster + log_path: + description: + - A local path to a file to be used for debug logging + required: False + type: str + version_added: 2.7 +""" + +EXAMPLES = """ + - name: Define or update an existing host named 'Host1' + netapp_e_host: + ssid: "1" + api_url: "10.113.1.101:8443" + api_username: admin + api_password: myPassword + name: "Host1" + state: present + host_type_index: Linux DM-MP + ports: + - type: 'iscsi' + label: 'PORT_1' + port: 'iqn.1996-04.de.suse:01:56f86f9bd1fe' + - type: 'fc' + label: 'FC_1' + port: '10:00:FF:7C:FF:FF:FF:01' + - type: 'fc' + label: 'FC_2' + port: '10:00:FF:7C:FF:FF:FF:00' + + - name: Ensure a host named 'Host2' doesn't exist + netapp_e_host: + ssid: "1" + api_url: "10.113.1.101:8443" + api_username: admin + api_password: myPassword + name: "Host2" + state: absent +""" + +RETURN = """ +msg: + description: + - A user-readable description of the actions performed. + returned: on success + type: str + sample: The host has been created. +id: + description: + - the unique identifier of the host on the E-Series storage-system + returned: on success when state=present + type: str + sample: 00000000600A098000AAC0C3003004700AD86A52 + version_added: "2.6" + +ssid: + description: + - the unique identifier of the E-Series storage-system with the current api + returned: on success + type: str + sample: 1 + version_added: "2.6" + +api_url: + description: + - the url of the API that this request was processed by + returned: on success + type: str + sample: https://webservices.example.com:8443 + version_added: "2.6" +""" +import json +import logging +import re +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class Host(object): + HOST_TYPE_INDEXES = {"linux dm-mp": 28, "vmware": 10, "windows": 1, "windows clustered": 8} + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', default='present', choices=['absent', 'present']), + group=dict(type='str', required=False, aliases=['cluster']), + ports=dict(type='list', required=False), + force_port=dict(type='bool', default=False), + name=dict(type='str', required=True, aliases=['label']), + host_type=dict(type='str', aliases=['host_type_index']), + log_path=dict(type='str', required=False), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + self.check_mode = self.module.check_mode + args = self.module.params + self.group = args['group'] + self.ports = args['ports'] + self.force_port = args['force_port'] + self.name = args['name'] + self.state = args['state'] + self.ssid = args['ssid'] + self.url = args['api_url'] + self.user = args['api_username'] + self.pwd = args['api_password'] + self.certs = args['validate_certs'] + + self.post_body = dict() + self.all_hosts = list() + self.host_obj = dict() + self.newPorts = list() + self.portsForUpdate = list() + self.portsForRemoval = list() + + # Update host type with the corresponding index + host_type = args['host_type_index'] + if host_type: + host_type = host_type.lower() + if host_type in [key.lower() for key in list(self.HOST_TYPE_INDEXES.keys())]: + self.host_type_index = self.HOST_TYPE_INDEXES[host_type] + elif host_type.isdigit(): + self.host_type_index = int(args['host_type_index']) + else: + self.module.fail_json(msg="host_type must be either a host type name or host type index found integer" + " the documentation.") + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + if args['log_path']: + logging.basicConfig( + level=logging.DEBUG, filename=args['log_path'], filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + # Ensure when state==present then host_type_index is defined + if self.state == "present" and self.host_type_index is None: + self.module.fail_json(msg="Host_type_index is required when state=='present'. Array Id: [%s]" % self.ssid) + + # Fix port representation if they are provided with colons + if self.ports is not None: + for port in self.ports: + port['label'] = port['label'].lower() + port['type'] = port['type'].lower() + port['port'] = port['port'].lower() + + # Determine whether address is 16-byte WWPN and, if so, remove + if re.match(r'^(0x)?[0-9a-f]{16}$', port['port'].replace(':', '')): + port['port'] = port['port'].replace(':', '').replace('0x', '') + + def valid_host_type(self): + host_types = None + try: + (rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except Exception as err: + self.module.fail_json( + msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + try: + match = list(filter(lambda host_type: host_type['index'] == self.host_type_index, host_types))[0] + return True + except IndexError: + self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index) + + def assigned_host_ports(self, apply_unassigning=False): + """Determine if the hostPorts requested have already been assigned and return list of required used ports.""" + used_host_ports = {} + for host in self.all_hosts: + if host['label'] != self.name: + for host_port in host['hostSidePorts']: + for port in self.ports: + if port['port'] == host_port["address"] or port['label'] == host_port['label']: + if not self.force_port: + self.module.fail_json(msg="There are no host ports available OR there are not enough" + " unassigned host ports") + else: + # Determine port reference + port_ref = [port["hostPortRef"] for port in host["ports"] + if port["hostPortName"] == host_port["address"]] + port_ref.extend([port["initiatorRef"] for port in host["initiators"] + if port["nodeName"]["iscsiNodeName"] == host_port["address"]]) + + # Create dictionary of hosts containing list of port references + if host["hostRef"] not in used_host_ports.keys(): + used_host_ports.update({host["hostRef"]: port_ref}) + else: + used_host_ports[host["hostRef"]].extend(port_ref) + else: + for host_port in host['hostSidePorts']: + for port in self.ports: + if ((host_port['label'] == port['label'] and host_port['address'] != port['port']) or + (host_port['label'] != port['label'] and host_port['address'] == port['port'])): + if not self.force_port: + self.module.fail_json(msg="There are no host ports available OR there are not enough" + " unassigned host ports") + else: + # Determine port reference + port_ref = [port["hostPortRef"] for port in host["ports"] + if port["hostPortName"] == host_port["address"]] + port_ref.extend([port["initiatorRef"] for port in host["initiators"] + if port["nodeName"]["iscsiNodeName"] == host_port["address"]]) + + # Create dictionary of hosts containing list of port references + if host["hostRef"] not in used_host_ports.keys(): + used_host_ports.update({host["hostRef"]: port_ref}) + else: + used_host_ports[host["hostRef"]].extend(port_ref) + + # Unassign assigned ports + if apply_unassigning: + for host_ref in used_host_ports.keys(): + try: + rc, resp = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, host_ref), + url_username=self.user, url_password=self.pwd, headers=HEADERS, + validate_certs=self.certs, method='POST', + data=json.dumps({"portsToRemove": used_host_ports[host_ref]})) + except Exception as err: + self.module.fail_json(msg="Failed to unassign host port. Host Id [%s]. Array Id [%s]. Ports [%s]." + " Error [%s]." % (self.host_obj['id'], self.ssid, + used_host_ports[host_ref], to_native(err))) + + return used_host_ports + + def group_id(self): + if self.group: + try: + (rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid, + url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except Exception as err: + self.module.fail_json( + msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + try: + group_obj = list(filter(lambda group: group['name'] == self.group, all_groups))[0] + return group_obj['id'] + except IndexError: + self.module.fail_json(msg="No group with the name: %s exists" % self.group) + else: + # Return the value equivalent of no group + return "0000000000000000000000000000000000000000" + + def host_exists(self): + """Determine if the requested host exists + As a side effect, set the full list of defined hosts in 'all_hosts', and the target host in 'host_obj'. + """ + match = False + all_hosts = list() + + try: + (rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd, + url_username=self.user, validate_certs=self.certs, headers=HEADERS) + except Exception as err: + self.module.fail_json( + msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + # Augment the host objects + for host in all_hosts: + for port in host['hostSidePorts']: + port['type'] = port['type'].lower() + port['address'] = port['address'].lower() + port['label'] = port['label'].lower() + + # Augment hostSidePorts with their ID (this is an omission in the API) + ports = dict((port['label'], port['id']) for port in host['ports']) + ports.update((port['label'], port['id']) for port in host['initiators']) + + for host_side_port in host['hostSidePorts']: + if host_side_port['label'] in ports: + host_side_port['id'] = ports[host_side_port['label']] + + if host['label'] == self.name: + self.host_obj = host + match = True + + self.all_hosts = all_hosts + return match + + def needs_update(self): + """Determine whether we need to update the Host object + As a side effect, we will set the ports that we need to update (portsForUpdate), and the ports we need to add + (newPorts), on self. + """ + changed = False + if (self.host_obj["clusterRef"].lower() != self.group_id().lower() or + self.host_obj["hostTypeIndex"] != self.host_type_index): + self._logger.info("Either hostType or the clusterRef doesn't match, an update is required.") + changed = True + current_host_ports = dict((port["id"], {"type": port["type"], "port": port["address"], "label": port["label"]}) + for port in self.host_obj["hostSidePorts"]) + + if self.ports: + for port in self.ports: + for current_host_port_id in current_host_ports.keys(): + if port == current_host_ports[current_host_port_id]: + current_host_ports.pop(current_host_port_id) + break + elif port["port"] == current_host_ports[current_host_port_id]["port"]: + if self.port_on_diff_host(port) and not self.force_port: + self.module.fail_json(msg="The port you specified [%s] is associated with a different host." + " Specify force_port as True or try a different port spec" % port) + + if (port["label"] != current_host_ports[current_host_port_id]["label"] or + port["type"] != current_host_ports[current_host_port_id]["type"]): + current_host_ports.pop(current_host_port_id) + self.portsForUpdate.append({"portRef": current_host_port_id, "port": port["port"], + "label": port["label"], "hostRef": self.host_obj["hostRef"]}) + break + else: + self.newPorts.append(port) + + self.portsForRemoval = list(current_host_ports.keys()) + changed = any([self.newPorts, self.portsForUpdate, self.portsForRemoval, changed]) + + return changed + + def port_on_diff_host(self, arg_port): + """ Checks to see if a passed in port arg is present on a different host """ + for host in self.all_hosts: + # Only check 'other' hosts + if host['name'] != self.name: + for port in host['hostSidePorts']: + # Check if the port label is found in the port dict list of each host + if arg_port['label'] == port['label'] or arg_port['port'] == port['address']: + self.other_host = host + return True + return False + + def update_host(self): + self._logger.info("Beginning the update for host=%s.", self.name) + + if self.ports: + + # Remove ports that need reassigning from their current host. + self.assigned_host_ports(apply_unassigning=True) + + self.post_body["portsToUpdate"] = self.portsForUpdate + self.post_body["ports"] = self.newPorts + self._logger.info("Requested ports: %s", pformat(self.ports)) + else: + self._logger.info("No host ports were defined.") + + if self.group: + self.post_body['groupId'] = self.group_id() + + self.post_body['hostType'] = dict(index=self.host_type_index) + + api = self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']) + self._logger.info("POST => url=%s, body=%s.", api, pformat(self.post_body)) + + if not self.check_mode: + try: + (rc, self.host_obj) = request(api, url_username=self.user, url_password=self.pwd, headers=HEADERS, + validate_certs=self.certs, method='POST', data=json.dumps(self.post_body)) + except Exception as err: + self.module.fail_json( + msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=True, **payload) + + def create_host(self): + self._logger.info("Creating host definition.") + + # Remove ports that need reassigning from their current host. + self.assigned_host_ports(apply_unassigning=True) + + # needs_reassignment = False + post_body = dict( + name=self.name, + hostType=dict(index=self.host_type_index), + groupId=self.group_id(), + ) + + if self.ports: + post_body.update(ports=self.ports) + + api = self.url + "storage-systems/%s/hosts" % self.ssid + self._logger.info('POST => url=%s, body=%s', api, pformat(post_body)) + + if not self.check_mode: + if not self.host_exists(): + try: + (rc, self.host_obj) = request(api, method='POST', url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + data=json.dumps(post_body), headers=HEADERS) + except Exception as err: + self.module.fail_json( + msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + else: + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=False, msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name), **payload) + + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=True, msg='Host created.', **payload) + + def remove_host(self): + try: + (rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']), + method='DELETE', + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except Exception as err: + self.module.fail_json( + msg="Failed to remove host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'], + self.ssid, + to_native(err))) + + def build_success_payload(self, host=None): + keys = ['id'] + if host is not None: + result = dict((key, host[key]) for key in keys) + else: + result = dict() + result['ssid'] = self.ssid + result['api_url'] = self.url + return result + + def apply(self): + if self.state == 'present': + if self.host_exists(): + if self.needs_update() and self.valid_host_type(): + self.update_host() + else: + payload = self.build_success_payload(self.host_obj) + self.module.exit_json(changed=False, msg="Host already present; no changes required.", **payload) + elif self.valid_host_type(): + self.create_host() + else: + payload = self.build_success_payload() + if self.host_exists(): + self.remove_host() + self.module.exit_json(changed=True, msg="Host removed.", **payload) + else: + self.module.exit_json(changed=False, msg="Host already absent.", **payload) + + +def main(): + host = Host() + host.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py new file mode 100644 index 000000000..87676106f --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_hostgroup.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {"metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community"} + + +DOCUMENTATION = """ +--- +module: netapp_e_hostgroup +version_added: "2.2" +short_description: NetApp E-Series manage array host groups +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +description: Create, update or destroy host groups on a NetApp E-Series storage array. +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + required: true + description: + - Whether the specified host group should exist or not. + type: str + choices: ["present", "absent"] + name: + required: false + description: + - Name of the host group to manage + - This option is mutually exclusive with I(id). + type: str + new_name: + required: false + description: + - Specify this when you need to update the name of a host group + type: str + id: + required: false + description: + - Host reference identifier for the host group to manage. + - This option is mutually exclusive with I(name). + type: str + hosts: + required: false + description: + - List of host names/labels to add to the group + type: list +""" +EXAMPLES = """ + - name: Configure Hostgroup + netapp_e_hostgroup: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present +""" +RETURN = """ +clusterRef: + description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster. + returned: always except when state is absent + type: str + sample: "3233343536373839303132333100000000000000" +confirmLUNMappingCreation: + description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping + will alter the volume access rights of other clusters, in addition to this one. + returned: always + type: bool + sample: false +hosts: + description: A list of the hosts that are part of the host group after all operations. + returned: always except when state is absent + type: list + sample: ["HostA","HostB"] +id: + description: The id number of the hostgroup + returned: always except when state is absent + type: str + sample: "3233343536373839303132333100000000000000" +isSAControlled: + description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false, + indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings. + returned: always except when state is absent + type: bool + sample: false +label: + description: The user-assigned, descriptive label string for the cluster. + returned: always + type: str + sample: "MyHostGroup" +name: + description: same as label + returned: always except when state is absent + type: str + sample: "MyHostGroup" +protectionInformationCapableAccessMethod: + description: This field is true if the host has a PI capable access method. + returned: always except when state is absent + type: bool + sample: true +""" + +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesHostGroup(NetAppESeriesModule): + EXPANSION_TIMEOUT_SEC = 10 + DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11 + + def __init__(self): + version = "02.00.0000.0000" + ansible_options = dict( + state=dict(required=True, choices=["present", "absent"], type="str"), + name=dict(required=False, type="str"), + new_name=dict(required=False, type="str"), + id=dict(required=False, type="str"), + hosts=dict(required=False, type="list")) + mutually_exclusive = [["name", "id"]] + super(NetAppESeriesHostGroup, self).__init__(ansible_options=ansible_options, + web_services_version=version, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive) + + args = self.module.params + self.state = args["state"] + self.name = args["name"] + self.new_name = args["new_name"] + self.id = args["id"] + self.hosts_list = args["hosts"] + + self.current_host_group = None + + @property + def hosts(self): + """Retrieve a list of host reference identifiers should be associated with the host group.""" + host_list = [] + existing_hosts = [] + + if self.hosts_list: + try: + rc, existing_hosts = self.request("storage-systems/%s/hosts" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve hosts information. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + for host in self.hosts_list: + for existing_host in existing_hosts: + if host in existing_host["id"] or host in existing_host["name"]: + host_list.append(existing_host["id"]) + break + else: + self.module.fail_json(msg="Expected host does not exist. Array id [%s]. Host [%s]." + % (self.ssid, host)) + + return host_list + + @property + def host_groups(self): + """Retrieve a list of existing host groups.""" + host_groups = [] + hosts = [] + try: + rc, host_groups = self.request("storage-systems/%s/host-groups" % self.ssid) + rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve host group information. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + host_groups = [{"id": group["clusterRef"], "name": group["name"]} for group in host_groups] + for group in host_groups: + hosts_ids = [] + for host in hosts: + if group["id"] == host["clusterRef"]: + hosts_ids.append(host["hostRef"]) + group.update({"hosts": hosts_ids}) + + return host_groups + + @property + def current_hosts_in_host_group(self): + """Retrieve the current hosts associated with the current hostgroup.""" + current_hosts = [] + for group in self.host_groups: + if (self.name and group["name"] == self.name) or (self.id and group["id"] == self.id): + current_hosts = group["hosts"] + + return current_hosts + + def unassign_hosts(self, host_list=None): + """Unassign hosts from host group.""" + if host_list is None: + host_list = self.current_host_group["hosts"] + + for host_id in host_list: + try: + rc, resp = self.request("storage-systems/%s/hosts/%s/move" % (self.ssid, host_id), + method="POST", data={"group": "0000000000000000000000000000000000000000"}) + except Exception as error: + self.module.fail_json(msg="Failed to unassign hosts from host group. Array id [%s]. Host id [%s]." + " Error[%s]." % (self.ssid, host_id, to_native(error))) + + def delete_host_group(self, unassign_hosts=True): + """Delete host group""" + if unassign_hosts: + self.unassign_hosts() + + try: + rc, resp = self.request("storage-systems/%s/host-groups/%s" % (self.ssid, self.current_host_group["id"]), + method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete host group. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + def create_host_group(self): + """Create host group.""" + data = {"name": self.name, "hosts": self.hosts} + + response = None + try: + rc, response = self.request("storage-systems/%s/host-groups" % self.ssid, method="POST", data=data) + except Exception as error: + self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + return response + + def update_host_group(self): + """Update host group.""" + data = {"name": self.new_name if self.new_name else self.name, + "hosts": self.hosts} + + # unassign hosts that should not be part of the hostgroup + desired_host_ids = self.hosts + for host in self.current_hosts_in_host_group: + if host not in desired_host_ids: + self.unassign_hosts([host]) + + update_response = None + try: + rc, update_response = self.request("storage-systems/%s/host-groups/%s" + % (self.ssid, self.current_host_group["id"]), method="POST", data=data) + except Exception as error: + self.module.fail_json(msg="Failed to create host group. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + return update_response + + def apply(self): + """Apply desired host group state to the storage array.""" + changes_required = False + + # Search for existing host group match + for group in self.host_groups: + if (self.id and group["id"] == self.id) or (self.name and group["name"] == self.name): + self.current_host_group = group + + # Determine whether changes are required + if self.state == "present": + if self.current_host_group: + if (self.new_name and self.new_name != self.name) or self.hosts != self.current_host_group["hosts"]: + changes_required = True + else: + if not self.name: + self.module.fail_json(msg="The option name must be supplied when creating a new host group." + " Array id [%s]." % self.ssid) + changes_required = True + + elif self.current_host_group: + changes_required = True + + # Apply any necessary changes + msg = "" + if changes_required and not self.module.check_mode: + msg = "No changes required." + if self.state == "present": + if self.current_host_group: + if ((self.new_name and self.new_name != self.name) or + (self.hosts != self.current_host_group["hosts"])): + msg = self.update_host_group() + else: + msg = self.create_host_group() + + elif self.current_host_group: + self.delete_host_group() + msg = "Host group deleted. Array Id [%s]. Host Name [%s]. Host Id [%s]."\ + % (self.ssid, self.current_host_group["name"], self.current_host_group["id"]) + + self.module.exit_json(msg=msg, changed=changes_required) + + +def main(): + hostgroup = NetAppESeriesHostGroup() + hostgroup.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py new file mode 100644 index 000000000..5e290f74e --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_interface.py @@ -0,0 +1,407 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_iscsi_interface +short_description: NetApp E-Series manage iSCSI interface configuration +description: + - Configure settings of an E-Series iSCSI interface +version_added: '2.7' +author: Michael Price (@lmprice) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are presented alphabetically, with the first controller as A, + the second as B, and so on. + - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard + limitation and could change in the future. + required: yes + type: str + choices: + - A + - B + name: + description: + - The channel of the port to modify the configuration of. + - The list of choices is not necessarily comprehensive. It depends on the number of ports + that are available in the system. + - The numerical value represents the number of the channel (typically from left to right on the HIC), + beginning with a value of 1. + required: yes + type: int + aliases: + - channel + state: + description: + - When enabled, the provided configuration will be utilized. + - When disabled, the IPv4 configuration will be cleared and IPv4 connectivity disabled. + choices: + - enabled + - disabled + default: enabled + type: str + address: + description: + - The IPv4 address to assign to the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + subnet_mask: + description: + - The subnet mask to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + gateway: + description: + - The IPv4 gateway address to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + config_method: + description: + - The configuration method type to use for this interface. + - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway). + choices: + - dhcp + - static + default: dhcp + type: str + mtu: + description: + - The maximum transmission units (MTU), in bytes. + - This allows you to configure a larger value for the MTU, in order to enable jumbo frames + (any value > 1500). + - Generally, it is necessary to have your host, switches, and other components not only support jumbo + frames, but also have it configured properly. Therefore, unless you know what you're doing, it's best to + leave this at the default. + default: 1500 + type: int + aliases: + - max_frame_size + log_path: + description: + - A local path to a file to be used for debug logging + type: str + required: no +notes: + - Check mode is supported. + - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address + via dhcp, etc), can take seconds or minutes longer to take effect. + - This module will not be useful/usable on an E-Series system without any iSCSI interfaces. + - This module requires a Web Services API version of >= 1.3. +""" + +EXAMPLES = """ + - name: Configure the first port on the A controller with a static IPv4 address + netapp_e_iscsi_interface: + name: "1" + controller: "A" + config_method: static + address: "192.168.1.100" + subnet_mask: "255.255.255.0" + gateway: "192.168.1.1" + ssid: "1" + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Disable ipv4 connectivity for the second port on the B controller + netapp_e_iscsi_interface: + name: "2" + controller: "B" + state: disabled + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + + - name: Enable jumbo frames for the first 4 ports on controller A + netapp_e_iscsi_interface: + name: "{{ item | int }}" + controller: "A" + state: enabled + mtu: 9000 + config_method: dhcp + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + loop: + - 1 + - 2 + - 3 + - 4 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The interface settings have been updated. +enabled: + description: + - Indicates whether IPv4 connectivity has been enabled or disabled. + - This does not necessarily indicate connectivity. If dhcp was enabled without a dhcp server, for instance, + it is unlikely that the configuration will actually be valid. + returned: on success + sample: True + type: bool +""" +import json +import logging +from pprint import pformat +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class IscsiInterface(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + controller=dict(type='str', required=True, choices=['A', 'B']), + name=dict(type='int', aliases=['channel']), + state=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']), + address=dict(type='str', required=False), + subnet_mask=dict(type='str', required=False), + gateway=dict(type='str', required=False), + config_method=dict(type='str', required=False, default='dhcp', choices=['dhcp', 'static']), + mtu=dict(type='int', default=1500, required=False, aliases=['max_frame_size']), + log_path=dict(type='str', required=False), + )) + + required_if = [ + ["config_method", "static", ["address", "subnet_mask"]], + ] + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, ) + args = self.module.params + self.controller = args['controller'] + self.name = args['name'] + self.mtu = args['mtu'] + self.state = args['state'] + self.address = args['address'] + self.subnet_mask = args['subnet_mask'] + self.gateway = args['gateway'] + self.config_method = args['config_method'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + self.post_body = dict() + self.controllers = list() + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + if self.mtu < 1500 or self.mtu > 9000: + self.module.fail_json(msg="The provided mtu is invalid, it must be > 1500 and < 9000 bytes.") + + if self.config_method == 'dhcp' and any([self.address, self.subnet_mask, self.gateway]): + self.module.fail_json(msg='A config_method of dhcp is mutually exclusive with the address,' + ' subnet_mask, and gateway options.') + + # A relatively primitive regex to validate that the input is formatted like a valid ip address + address_regex = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}') + + if self.address and not address_regex.match(self.address): + self.module.fail_json(msg="An invalid ip address was provided for address.") + + if self.subnet_mask and not address_regex.match(self.subnet_mask): + self.module.fail_json(msg="An invalid ip address was provided for subnet_mask.") + + if self.gateway and not address_regex.match(self.gateway): + self.module.fail_json(msg="An invalid ip address was provided for gateway.") + + @property + def interfaces(self): + ifaces = list() + try: + (rc, ifaces) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/hostInterfaces' + % self.ssid, headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve defined host interfaces. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + # Filter out non-iSCSI interfaces + ifaces = [iface['iscsi'] for iface in ifaces if iface['interfaceType'] == 'iscsi'] + + return ifaces + + def get_controllers(self): + """Retrieve a mapping of controller labels to their references + { + 'A': '070000000000000000000001', + 'B': '070000000000000000000002', + } + :return: the controllers defined on the system + """ + controllers = list() + try: + (rc, controllers) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/controller/id' + % self.ssid, headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + controllers.sort() + + controllers_dict = {} + i = ord('A') + for controller in controllers: + label = chr(i) + controllers_dict[label] = controller + i += 1 + + return controllers_dict + + def fetch_target_interface(self): + interfaces = self.interfaces + + for iface in interfaces: + if iface['channel'] == self.name and self.controllers[self.controller] == iface['controllerId']: + return iface + + channels = sorted(set((str(iface['channel'])) for iface in interfaces + if self.controllers[self.controller] == iface['controllerId'])) + + self.module.fail_json(msg="The requested channel of %s is not valid. Valid channels include: %s." + % (self.name, ", ".join(channels))) + + def make_update_body(self, target_iface): + body = dict(iscsiInterface=target_iface['id']) + update_required = False + + self._logger.info("Requested state=%s.", self.state) + self._logger.info("config_method: current=%s, requested=%s", + target_iface['ipv4Data']['ipv4AddressConfigMethod'], self.config_method) + + if self.state == 'enabled': + settings = dict() + if not target_iface['ipv4Enabled']: + update_required = True + settings['ipv4Enabled'] = [True] + if self.mtu != target_iface['interfaceData']['ethernetData']['maximumFramePayloadSize']: + update_required = True + settings['maximumFramePayloadSize'] = [self.mtu] + if self.config_method == 'static': + ipv4Data = target_iface['ipv4Data']['ipv4AddressData'] + + if ipv4Data['ipv4Address'] != self.address: + update_required = True + settings['ipv4Address'] = [self.address] + if ipv4Data['ipv4SubnetMask'] != self.subnet_mask: + update_required = True + settings['ipv4SubnetMask'] = [self.subnet_mask] + if self.gateway is not None and ipv4Data['ipv4GatewayAddress'] != self.gateway: + update_required = True + settings['ipv4GatewayAddress'] = [self.gateway] + + if target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configStatic': + update_required = True + settings['ipv4AddressConfigMethod'] = ['configStatic'] + + elif (target_iface['ipv4Data']['ipv4AddressConfigMethod'] != 'configDhcp'): + update_required = True + settings.update(dict(ipv4Enabled=[True], + ipv4AddressConfigMethod=['configDhcp'])) + body['settings'] = settings + + else: + if target_iface['ipv4Enabled']: + update_required = True + body['settings'] = dict(ipv4Enabled=[False]) + + self._logger.info("Update required ?=%s", update_required) + self._logger.info("Update body: %s", pformat(body)) + + return update_required, body + + def update(self): + self.controllers = self.get_controllers() + if self.controller not in self.controllers: + self.module.fail_json(msg="The provided controller name is invalid. Valid controllers: %s." + % ", ".join(self.controllers.keys())) + + iface_before = self.fetch_target_interface() + update_required, body = self.make_update_body(iface_before) + if update_required and not self.check_mode: + try: + url = (self.url + + 'storage-systems/%s/symbol/setIscsiInterfaceProperties' % self.ssid) + (rc, result) = request(url, method='POST', data=json.dumps(body), headers=HEADERS, timeout=300, + ignore_errors=True, **self.creds) + # We could potentially retry this a few times, but it's probably a rare enough case (unless a playbook + # is cancelled mid-flight), that it isn't worth the complexity. + if rc == 422 and result['retcode'] in ['busy', '3']: + self.module.fail_json( + msg="The interface is currently busy (probably processing a previously requested modification" + " request). This operation cannot currently be completed. Array Id [%s]. Error [%s]." + % (self.ssid, result)) + # Handle authentication issues, etc. + elif rc != 200: + self.module.fail_json( + msg="Failed to modify the interface! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(result))) + self._logger.debug("Update request completed successfully.") + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json( + msg="Connection failure: we failed to modify the interface! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + iface_after = self.fetch_target_interface() + + self.module.exit_json(msg="The interface settings have been updated.", changed=update_required, + enabled=iface_after['ipv4Enabled']) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + iface = IscsiInterface() + iface() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py new file mode 100644 index 000000000..93b53b60c --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_iscsi_target.py @@ -0,0 +1,297 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_iscsi_target +short_description: NetApp E-Series manage iSCSI target configuration +description: + - Configure the settings of an E-Series iSCSI target +version_added: '2.7' +author: Michael Price (@lmprice) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + name: + description: + - The name/alias to assign to the iSCSI target. + - This alias is often used by the initiator software in order to make an iSCSI target easier to identify. + type: str + aliases: + - alias + ping: + description: + - Enable ICMP ping responses from the configured iSCSI ports. + type: bool + default: yes + chap_secret: + description: + - Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password. + - When this value is specified, we will always trigger an update (changed=True). We have no way of verifying + whether or not the password has changed. + - The chap secret may only use ascii characters with values between 32 and 126 decimal. + - The chap secret must be no less than 12 characters, but no greater than 57 characters in length. + - The chap secret is cleared when not specified or an empty string. + type: str + aliases: + - chap + - password + unnamed_discovery: + description: + - When an initiator initiates a discovery session to an initiator port, it is considered an unnamed + discovery session if the iSCSI target iqn is not specified in the request. + - This option may be disabled to increase security if desired. + type: bool + default: yes + log_path: + description: + - A local path (on the Ansible controller), to a file to be used for debug logging. + type: str + required: no +notes: + - Check mode is supported. + - Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using + M(netapp_e_iscsi_interface). + - This module requires a Web Services API version of >= 1.3. +""" + +EXAMPLES = """ + - name: Enable ping responses and unnamed discovery sessions for all iSCSI ports + netapp_e_iscsi_target: + api_url: "https://localhost:8443/devmgr/v2" + api_username: admin + api_password: myPassword + ssid: "1" + validate_certs: no + name: myTarget + ping: yes + unnamed_discovery: yes + + - name: Set the target alias and the CHAP secret + netapp_e_iscsi_target: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + name: myTarget + chap: password1234 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The iSCSI target settings have been updated. +alias: + description: + - The alias assigned to the iSCSI target. + returned: on success + sample: myArray + type: str +iqn: + description: + - The iqn (iSCSI Qualified Name), assigned to the iSCSI target. + returned: on success + sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45 + type: str +""" +import json +import logging +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class IscsiTarget(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + name=dict(type='str', required=False, aliases=['alias']), + ping=dict(type='bool', required=False, default=True), + chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True), + unnamed_discovery=dict(type='bool', required=False, default=True), + log_path=dict(type='str', required=False), + )) + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, ) + args = self.module.params + + self.name = args['name'] + self.ping = args['ping'] + self.chap_secret = args['chap_secret'] + self.unnamed_discovery = args['unnamed_discovery'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], ) + + self.check_mode = self.module.check_mode + self.post_body = dict() + self.controllers = list() + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + if self.chap_secret: + if len(self.chap_secret) < 12 or len(self.chap_secret) > 57: + self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 57" + " characters in length.") + + for c in self.chap_secret: + ordinal = ord(c) + if ordinal < 32 or ordinal > 126: + self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii" + " characters with decimal values between 32 and 126.") + + @property + def target(self): + """Provide information on the iSCSI Target configuration + + Sample: + { + 'alias': 'myCustomName', + 'ping': True, + 'unnamed_discovery': True, + 'chap': False, + 'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45', + } + """ + target = dict() + try: + (rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target' + % self.ssid, headers=HEADERS, **self.creds) + # This likely isn't an iSCSI-enabled system + if not data: + self.module.fail_json( + msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid)) + + data = data[0] + + chap = any( + [auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap']) + + target.update(dict(alias=data['alias']['iscsiAlias'], + iqn=data['nodeName']['iscsiNodeName'], + chap=chap)) + + (rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData' + % self.ssid, headers=HEADERS, **self.creds) + + data = data[0] + target.update(dict(ping=data['icmpPingResponseEnabled'], + unnamed_discovery=data['unnamedDiscoverySessionsEnabled'])) + + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return target + + def apply_iscsi_settings(self): + """Update the iSCSI target alias and CHAP settings""" + update = False + target = self.target + + body = dict() + + if self.name is not None and self.name != target['alias']: + update = True + body['alias'] = self.name + + # If the CHAP secret was provided, we trigger an update. + if self.chap_secret: + update = True + body.update(dict(enableChapAuthentication=True, + chapSecret=self.chap_secret)) + # If no secret was provided, then we disable chap + elif target['chap']: + update = True + body.update(dict(enableChapAuthentication=False)) + + if update and not self.check_mode: + try: + request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST', + data=json.dumps(body), headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return update + + def apply_target_changes(self): + update = False + target = self.target + + body = dict() + + if self.ping != target['ping']: + update = True + body['icmpPingResponseEnabled'] = self.ping + + if self.unnamed_discovery != target['unnamed_discovery']: + update = True + body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery + + self._logger.info(pformat(body)) + if update and not self.check_mode: + try: + request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST', + data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return update + + def update(self): + update = self.apply_iscsi_settings() + update = self.apply_target_changes() or update + + target = self.target + data = dict((key, target[key]) for key in target if key in ['iqn', 'alias']) + + self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + iface = IscsiTarget() + iface() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py new file mode 100644 index 000000000..e3bb61e60 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_ldap.py @@ -0,0 +1,401 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_ldap +short_description: NetApp E-Series manage LDAP integration to use for authentication +description: + - Configure an E-Series system to allow authentication via an LDAP server +version_added: '2.7' +author: Michael Price (@lmprice) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + description: + - Enable/disable LDAP support on the system. Disabling will clear out any existing defined domains. + choices: + - present + - absent + default: present + type: str + identifier: + description: + - This is a unique identifier for the configuration (for cases where there are multiple domains configured). + - If this is not specified, but I(state=present), we will utilize a default value of 'default'. + type: str + username: + description: + - This is the user account that will be used for querying the LDAP server. + - "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com" + required: yes + type: str + aliases: + - bind_username + password: + description: + - This is the password for the bind user account. + required: yes + type: str + aliases: + - bind_password + attributes: + description: + - The user attributes that should be considered for the group to role mapping. + - Typically this is used with something like 'memberOf', and a user's access is tested against group + membership or lack thereof. + default: memberOf + type: list + server: + description: + - This is the LDAP server url. + - The connection string should be specified as using the ldap or ldaps protocol along with the port + information. + aliases: + - server_url + required: yes + type: str + name: + description: + - The domain name[s] that will be utilized when authenticating to identify which domain to utilize. + - Default to use the DNS name of the I(server). + - The only requirement is that the name[s] be resolvable. + - "Example: user@example.com" + required: no + type: list + search_base: + description: + - The search base is used to find group memberships of the user. + - "Example: ou=users,dc=example,dc=com" + required: yes + type: str + role_mappings: + description: + - This is where you specify which groups should have access to what permissions for the + storage-system. + - For example, all users in group A will be assigned all 4 available roles, which will allow access + to all the management functionality of the system (super-user). Those in group B only have the + storage.monitor role, which will allow only read-only access. + - This is specified as a mapping of regular expressions to a list of roles. See the examples. + - The roles that will be assigned to to the group/groups matching the provided regex. + - storage.admin allows users full read/write access to storage objects and operations. + - storage.monitor allows users read-only access to storage objects and operations. + - support.admin allows users access to hardware, diagnostic information, the Major Event + Log, and other critical support-related functionality, but not the storage configuration. + - security.admin allows users access to authentication/authorization configuration, as well + as the audit log configuration, and certification management. + type: dict + required: yes + user_attribute: + description: + - This is the attribute we will use to match the provided username when a user attempts to + authenticate. + type: str + default: sAMAccountName + log_path: + description: + - A local path to a file to be used for debug logging + required: no + type: str +notes: + - Check mode is supported. + - This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for + authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given + different (or no), access to certain aspects of the system and API. + - The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible. + - Generally, you'll need to get the details of your organization's LDAP server before you'll be able to configure + the system for using LDAP authentication; every implementation is likely to be very different. + - This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy + v3.0 and higher. +''' + +EXAMPLES = ''' + - name: Disable LDAP authentication + netapp_e_ldap: + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + ssid: "1" + state: absent + + - name: Remove the 'default' LDAP domain configuration + netapp_e_ldap: + state: absent + identifier: default + + - name: Define a new LDAP domain, utilizing defaults where possible + netapp_e_ldap: + state: present + bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com" + bind_password: "mySecretPass" + server: "ldap://example.com:389" + search_base: 'OU=Users,DC=example,DC=com' + role_mappings: + ".*dist-dev-storage.*": + - storage.admin + - security.admin + - support.admin + - storage.monitor +''' + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The ldap settings have been updated. +""" + +import json +import logging + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + + +class Ldap(object): + NO_CHANGE_MSG = "No changes were necessary." + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type='str', required=False, default='present', + choices=['present', 'absent']), + identifier=dict(type='str', required=False, ), + username=dict(type='str', required=False, aliases=['bind_username']), + password=dict(type='str', required=False, aliases=['bind_password'], no_log=True), + name=dict(type='list', required=False, ), + server=dict(type='str', required=False, aliases=['server_url']), + search_base=dict(type='str', required=False, ), + role_mappings=dict(type='dict', required=False, ), + user_attribute=dict(type='str', required=False, default='sAMAccountName'), + attributes=dict(type='list', default=['memberOf'], required=False, ), + log_path=dict(type='str', required=False), + )) + + required_if = [ + ["state", "present", ["username", "password", "server", "search_base", "role_mappings", ]] + ] + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + args = self.module.params + self.ldap = args['state'] == 'present' + self.identifier = args['identifier'] + self.username = args['username'] + self.password = args['password'] + self.names = args['name'] + self.server = args['server'] + self.search_base = args['search_base'] + self.role_mappings = args['role_mappings'] + self.user_attribute = args['user_attribute'] + self.attributes = args['attributes'] + + self.ssid = args['ssid'] + self.url = args['api_url'] + self.creds = dict(url_password=args['api_password'], + validate_certs=args['validate_certs'], + url_username=args['api_username'], + timeout=60) + + self.check_mode = self.module.check_mode + + log_path = args['log_path'] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + self.embedded = None + self.base_path = None + + def make_configuration(self): + if not self.identifier: + self.identifier = 'default' + + if not self.names: + parts = urlparse.urlparse(self.server) + netloc = parts.netloc + if ':' in netloc: + netloc = netloc.split(':')[0] + self.names = [netloc] + + roles = list() + for regex in self.role_mappings: + for role in self.role_mappings[regex]: + roles.append(dict(groupRegex=regex, + ignoreCase=True, + name=role)) + + domain = dict(id=self.identifier, + ldapUrl=self.server, + bindLookupUser=dict(user=self.username, password=self.password), + roleMapCollection=roles, + groupAttributes=self.attributes, + names=self.names, + searchBase=self.search_base, + userAttribute=self.user_attribute, + ) + + return domain + + def is_embedded(self): + """Determine whether or not we're using the embedded or proxy implementation of Web Services""" + if self.embedded is None: + url = self.url + try: + parts = urlparse.urlparse(url) + parts = parts._replace(path='/devmgr/utils/') + url = urlparse.urlunparse(parts) + + (rc, result) = request(url + 'about', **self.creds) + self.embedded = not result['runningAsProxy'] + except Exception as err: + self._logger.exception("Failed to retrieve the About information.") + self.module.fail_json(msg="Failed to determine the Web Services implementation type!" + " Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return self.embedded + + def get_full_configuration(self): + try: + (rc, result) = request(self.url + self.base_path, **self.creds) + return result + except Exception as err: + self._logger.exception("Failed to retrieve the LDAP configuration.") + self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def get_configuration(self, identifier): + try: + (rc, result) = request(self.url + self.base_path + '%s' % (identifier), ignore_errors=True, **self.creds) + if rc == 200: + return result + elif rc == 404: + return None + else: + self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, result)) + except Exception as err: + self._logger.exception("Failed to retrieve the LDAP configuration.") + self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update_configuration(self): + # Define a new domain based on the user input + domain = self.make_configuration() + + # This is the current list of configurations + current = self.get_configuration(self.identifier) + + update = current != domain + msg = "No changes were necessary for [%s]." % self.identifier + self._logger.info("Is updated: %s", update) + if update and not self.check_mode: + msg = "The configuration changes were made for [%s]." % self.identifier + try: + if current is None: + api = self.base_path + 'addDomain' + else: + api = self.base_path + '%s' % (domain['id']) + + (rc, result) = request(self.url + api, method='POST', data=json.dumps(domain), **self.creds) + except Exception as err: + self._logger.exception("Failed to modify the LDAP configuration.") + self.module.fail_json(msg="Failed to modify LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + return msg, update + + def clear_single_configuration(self, identifier=None): + if identifier is None: + identifier = self.identifier + + configuration = self.get_configuration(identifier) + updated = False + msg = self.NO_CHANGE_MSG + if configuration: + updated = True + msg = "The LDAP domain configuration for [%s] was cleared." % identifier + if not self.check_mode: + try: + (rc, result) = request(self.url + self.base_path + '%s' % identifier, method='DELETE', **self.creds) + except Exception as err: + self.module.fail_json(msg="Failed to remove LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return msg, updated + + def clear_configuration(self): + configuration = self.get_full_configuration() + updated = False + msg = self.NO_CHANGE_MSG + if configuration['ldapDomains']: + updated = True + msg = "The LDAP configuration for all domains was cleared." + if not self.check_mode: + try: + (rc, result) = request(self.url + self.base_path, method='DELETE', ignore_errors=True, **self.creds) + + # Older versions of NetApp E-Series restAPI does not possess an API to remove all existing configs + if rc == 405: + for config in configuration['ldapDomains']: + self.clear_single_configuration(config['id']) + + except Exception as err: + self.module.fail_json(msg="Failed to clear LDAP configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + return msg, updated + + def get_base_path(self): + embedded = self.is_embedded() + if embedded: + return 'storage-systems/%s/ldap/' % self.ssid + else: + return '/ldap/' + + def update(self): + self.base_path = self.get_base_path() + + if self.ldap: + msg, update = self.update_configuration() + elif self.identifier: + msg, update = self.clear_single_configuration() + else: + msg, update = self.clear_configuration() + self.module.exit_json(msg=msg, changed=update, ) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + settings = Ldap() + settings() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py new file mode 100644 index 000000000..1b190ad32 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_lun_mapping.py @@ -0,0 +1,291 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = ''' +--- +module: netapp_e_lun_mapping +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +short_description: NetApp E-Series create, delete, or modify lun mappings +description: + - Create, delete, or modify mappings between a volume and a targeted host/host+ group. +version_added: "2.2" +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + description: + - Present will ensure the mapping exists, absent will remove the mapping. + required: True + type: str + choices: ["present", "absent"] + target: + description: + - The name of host or hostgroup you wish to assign to the mapping + - If omitted, the default hostgroup is used. + - If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here. + type: str + required: False + volume_name: + description: + - The name of the volume you wish to include in the mapping. + required: True + type: str + aliases: + - volume + lun: + description: + - The LUN value you wish to give the mapping. + - If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here. + - LUN value will be determine by the storage-system when not specified. + version_added: 2.7 + type: int + required: no + target_type: + description: + - This option specifies the whether the target should be a host or a group of hosts + - Only necessary when the target name is used for both a host and a group of hosts + choices: + - host + - group + version_added: 2.7 + type: str + required: no +''' + +EXAMPLES = ''' +--- + - name: Map volume1 to the host target host1 + netapp_e_lun_mapping: + ssid: 1 + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: no + state: present + target: host1 + volume: volume1 + - name: Delete the lun mapping between volume1 and host1 + netapp_e_lun_mapping: + ssid: 1 + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: yes + state: absent + target: host1 + volume: volume1 +''' +RETURN = ''' +msg: + description: success of the module + returned: always + type: str + sample: Lun mapping is complete +''' +import json +import logging +from pprint import pformat + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json" +} + + +class LunMapping(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=["present", "absent"]), + target=dict(required=False, default=None), + volume_name=dict(required=True, aliases=["volume"]), + lun=dict(type="int", required=False), + target_type=dict(required=False, choices=["host", "group"]))) + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + args = self.module.params + + self.state = args["state"] in ["present"] + self.target = args["target"] + self.volume = args["volume_name"] + self.lun = args["lun"] + self.target_type = args["target_type"] + self.ssid = args["ssid"] + self.url = args["api_url"] + self.check_mode = self.module.check_mode + self.creds = dict(url_username=args["api_username"], + url_password=args["api_password"], + validate_certs=args["validate_certs"]) + self.mapping_info = None + + if not self.url.endswith('/'): + self.url += '/' + + def update_mapping_info(self): + """Collect the current state of the storage array.""" + response = None + try: + rc, response = request(self.url + "storage-systems/%s/graph" % self.ssid, + method="GET", headers=HEADERS, **self.creds) + + except Exception as error: + self.module.fail_json( + msg="Failed to retrieve storage array graph. Id [%s]. Error [%s]" % (self.ssid, to_native(error))) + + # Create dictionary containing host/cluster references mapped to their names + target_reference = {} + target_name = {} + target_type = {} + + if self.target_type is None or self.target_type == "host": + for host in response["storagePoolBundle"]["host"]: + target_reference.update({host["hostRef"]: host["name"]}) + target_name.update({host["name"]: host["hostRef"]}) + target_type.update({host["name"]: "host"}) + + if self.target_type is None or self.target_type == "group": + for cluster in response["storagePoolBundle"]["cluster"]: + + # Verify there is no ambiguity between target's type (ie host and group has the same name) + if self.target and self.target_type is None and cluster["name"] == self.target and \ + self.target in target_name.keys(): + self.module.fail_json(msg="Ambiguous target type: target name is used for both host and group" + " targets! Id [%s]" % self.ssid) + + target_reference.update({cluster["clusterRef"]: cluster["name"]}) + target_name.update({cluster["name"]: cluster["clusterRef"]}) + target_type.update({cluster["name"]: "group"}) + + volume_reference = {} + volume_name = {} + lun_name = {} + for volume in response["volume"]: + volume_reference.update({volume["volumeRef"]: volume["name"]}) + volume_name.update({volume["name"]: volume["volumeRef"]}) + if volume["listOfMappings"]: + lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]}) + for volume in response["highLevelVolBundle"]["thinVolume"]: + volume_reference.update({volume["volumeRef"]: volume["name"]}) + volume_name.update({volume["name"]: volume["volumeRef"]}) + if volume["listOfMappings"]: + lun_name.update({volume["name"]: volume["listOfMappings"][0]["lun"]}) + + # Build current mapping object + self.mapping_info = dict(lun_mapping=[dict(volume_reference=mapping["volumeRef"], + map_reference=mapping["mapRef"], + lun_mapping_reference=mapping["lunMappingRef"], + lun=mapping["lun"] + ) for mapping in response["storagePoolBundle"]["lunMapping"]], + volume_by_reference=volume_reference, + volume_by_name=volume_name, + lun_by_name=lun_name, + target_by_reference=target_reference, + target_by_name=target_name, + target_type_by_name=target_type) + + def get_lun_mapping(self): + """Find the matching lun mapping reference. + + Returns: tuple(bool, int, int): contains volume match, volume mapping reference and mapping lun + """ + target_match = False + reference = None + lun = None + + self.update_mapping_info() + + # Verify that when a lun is specified that it does not match an existing lun value unless it is associated with + # the specified volume (ie for an update) + if self.lun and any((self.lun == lun_mapping["lun"] and + self.target == self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] and + self.volume != self.mapping_info["volume_by_reference"][lun_mapping["volume_reference"]] + ) for lun_mapping in self.mapping_info["lun_mapping"]): + self.module.fail_json(msg="Option lun value is already in use for target! Array Id [%s]." % self.ssid) + + # Verify that when target_type is specified then it matches the target's actually type + if self.target and self.target_type and self.target in self.mapping_info["target_type_by_name"].keys() and \ + self.mapping_info["target_type_by_name"][self.target] != self.target_type: + self.module.fail_json( + msg="Option target does not match the specified target_type! Id [%s]." % self.ssid) + + # Verify volume and target exist if needed for expected state. + if self.state: + if self.volume not in self.mapping_info["volume_by_name"].keys(): + self.module.fail_json(msg="Volume does not exist. Id [%s]." % self.ssid) + if self.target and self.target not in self.mapping_info["target_by_name"].keys(): + self.module.fail_json(msg="Target does not exist. Id [%s'." % self.ssid) + + for lun_mapping in self.mapping_info["lun_mapping"]: + + # Find matching volume reference + if lun_mapping["volume_reference"] == self.mapping_info["volume_by_name"][self.volume]: + reference = lun_mapping["lun_mapping_reference"] + lun = lun_mapping["lun"] + + # Determine if lun mapping is attached to target with the + if (lun_mapping["map_reference"] in self.mapping_info["target_by_reference"].keys() and + self.mapping_info["target_by_reference"][lun_mapping["map_reference"]] == self.target and + (self.lun is None or lun == self.lun)): + target_match = True + + return target_match, reference, lun + + def update(self): + """Execute the changes the require changes on the storage array.""" + target_match, lun_reference, lun = self.get_lun_mapping() + update = (self.state and not target_match) or (not self.state and target_match) + + if update and not self.check_mode: + try: + if self.state: + body = dict() + target = None if not self.target else self.mapping_info["target_by_name"][self.target] + if target: + body.update(dict(targetId=target)) + if self.lun is not None: + body.update(dict(lun=self.lun)) + + if lun_reference: + + rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s/move" + % (self.ssid, lun_reference), method="POST", data=json.dumps(body), + headers=HEADERS, **self.creds) + else: + body.update(dict(mappableObjectId=self.mapping_info["volume_by_name"][self.volume])) + rc, response = request(self.url + "storage-systems/%s/volume-mappings" % self.ssid, + method="POST", data=json.dumps(body), headers=HEADERS, **self.creds) + + else: # Remove existing lun mapping for volume and target + rc, response = request(self.url + "storage-systems/%s/volume-mappings/%s" + % (self.ssid, lun_reference), + method="DELETE", headers=HEADERS, **self.creds) + except Exception as error: + self.module.fail_json( + msg="Failed to update storage array lun mapping. Id [%s]. Error [%s]" + % (self.ssid, to_native(error))) + + self.module.exit_json(msg="Lun mapping is complete.", changed=update) + + +def main(): + lun_mapping = LunMapping() + lun_mapping.update() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py new file mode 100644 index 000000000..8a5e4f8e5 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_mgmt_interface.py @@ -0,0 +1,723 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_mgmt_interface +short_description: NetApp E-Series management interface configuration +description: + - Configure the E-Series management interfaces +version_added: '2.7' +author: + - Michael Price (@lmprice) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + description: + - Enable or disable IPv4 network interface configuration. + - Either IPv4 or IPv6 must be enabled otherwise error will occur. + - Only required when enabling or disabling IPv4 network interface + choices: + - enable + - disable + required: no + type: str + aliases: + - enable_interface + controller: + description: + - The controller that owns the port you want to configure. + - Controller names are represented alphabetically, with the first controller as A, + the second as B, and so on. + - Current hardware models have either 1 or 2 available controllers, but that is not a guaranteed hard + limitation and could change in the future. + required: yes + type: str + choices: + - A + - B + name: + description: + - The port to modify the configuration for. + - The list of choices is not necessarily comprehensive. It depends on the number of ports + that are present in the system. + - The name represents the port number (typically from left to right on the controller), + beginning with a value of 1. + - Mutually exclusive with I(channel). + type: str + aliases: + - port + - iface + channel: + description: + - The port to modify the configuration for. + - The channel represents the port number (typically from left to right on the controller), + beginning with a value of 1. + - Mutually exclusive with I(name). + type: int + address: + description: + - The IPv4 address to assign to the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + required: no + subnet_mask: + description: + - The subnet mask to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + required: no + gateway: + description: + - The IPv4 gateway address to utilize for the interface. + - Should be specified in xx.xx.xx.xx form. + - Mutually exclusive with I(config_method=dhcp) + type: str + required: no + config_method: + description: + - The configuration method type to use for network interface ports. + - dhcp is mutually exclusive with I(address), I(subnet_mask), and I(gateway). + choices: + - dhcp + - static + type: str + required: no + dns_config_method: + description: + - The configuration method type to use for DNS services. + - dhcp is mutually exclusive with I(dns_address), and I(dns_address_backup). + choices: + - dhcp + - static + type: str + required: no + dns_address: + description: + - Primary IPv4 DNS server address + type: str + required: no + dns_address_backup: + description: + - Backup IPv4 DNS server address + - Queried when primary DNS server fails + type: str + required: no + ntp_config_method: + description: + - The configuration method type to use for NTP services. + - disable is mutually exclusive with I(ntp_address) and I(ntp_address_backup). + - dhcp is mutually exclusive with I(ntp_address) and I(ntp_address_backup). + choices: + - disable + - dhcp + - static + type: str + required: no + ntp_address: + description: + - Primary IPv4 NTP server address + type: str + required: no + ntp_address_backup: + description: + - Backup IPv4 NTP server address + - Queried when primary NTP server fails + required: no + type: str + ssh: + type: bool + description: + - Enable ssh access to the controller for debug purposes. + - This is a controller-level setting. + - rlogin/telnet will be enabled for ancient equipment where ssh is not available. + required: no + log_path: + description: + - A local path to a file to be used for debug logging + type: str + required: no +notes: + - Check mode is supported. + - The interface settings are applied synchronously, but changes to the interface itself (receiving a new IP address + via dhcp, etc), can take seconds or minutes longer to take effect. + - "Known issue: Changes specifically to down ports will result in a failure. However, this may not be the case in up + coming NetApp E-Series firmware releases (released after firmware version 11.40.2)." +""" + +EXAMPLES = """ + - name: Configure the first port on the A controller with a static IPv4 address + netapp_e_mgmt_interface: + channel: 1 + controller: "A" + config_method: static + address: "192.168.1.100" + subnet_mask: "255.255.255.0" + gateway: "192.168.1.1" + ssid: "1" + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + + - name: Disable ipv4 connectivity for the second port on the B controller + netapp_e_mgmt_interface: + channel: 2 + controller: "B" + enable_interface: no + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + + - name: Enable ssh access for ports one and two on controller A + netapp_e_mgmt_interface: + channel: "{{ item }}" + controller: "A" + ssh: yes + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + loop: + - 1 + - 2 + + - name: Configure static DNS settings for the first port on controller A + netapp_e_mgmt_interface: + channel: 1 + controller: "A" + dns_config_method: static + dns_address: "192.168.1.100" + dns_address_backup: "192.168.1.1" + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + + - name: Configure static NTP settings for ports one and two on controller B + netapp_e_mgmt_interface: + channel: "{{ item }}" + controller: "B" + ntp_config_method: static + ntp_address: "129.100.1.100" + ntp_address_backup: "127.100.1.1" + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + loop: + - 1 + - 2 +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The interface settings have been updated. +enabled: + description: + - Indicates whether IPv4 connectivity has been enabled or disabled. + - This does not necessarily indicate connectivity. If dhcp was enabled absent a dhcp server, for instance, + it is unlikely that the configuration will actually be valid. + returned: on success + sample: True + type: bool +""" +import json +import logging +from pprint import pformat, pprint +import time +import socket + +try: + import urlparse +except ImportError: + import urllib.parse as urlparse + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class MgmtInterface(object): + MAX_RETRIES = 15 + + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(type="str", choices=["enable", "disable"], + aliases=["enable_interface"], required=False), + controller=dict(type="str", required=True, choices=["A", "B"]), + name=dict(type="str", aliases=["port", "iface"]), + channel=dict(type="int"), + address=dict(type="str", required=False), + subnet_mask=dict(type="str", required=False), + gateway=dict(type="str", required=False), + config_method=dict(type="str", required=False, choices=["dhcp", "static"]), + dns_config_method=dict(type="str", required=False, choices=["dhcp", "static"]), + dns_address=dict(type="str", required=False), + dns_address_backup=dict(type="str", required=False), + ntp_config_method=dict(type="str", required=False, choices=["disable", "dhcp", "static"]), + ntp_address=dict(type="str", required=False), + ntp_address_backup=dict(type="str", required=False), + ssh=dict(type="bool", required=False), + log_path=dict(type="str", required=False), + )) + + required_if = [ + ["state", "enable", ["config_method"]], + ["config_method", "static", ["address", "subnet_mask"]], + ["dns_config_method", "static", ["dns_address"]], + ["ntp_config_method", "static", ["ntp_address"]], + ] + + mutually_exclusive = [ + ["name", "channel"], + ] + + self.module = AnsibleModule(argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + mutually_exclusive=mutually_exclusive) + args = self.module.params + + self.controller = args["controller"] + self.name = args["name"] + self.channel = args["channel"] + + self.config_method = args["config_method"] + self.address = args["address"] + self.subnet_mask = args["subnet_mask"] + self.gateway = args["gateway"] + self.enable_interface = None if args["state"] is None else args["state"] == "enable" + + self.dns_config_method = args["dns_config_method"] + self.dns_address = args["dns_address"] + self.dns_address_backup = args["dns_address_backup"] + + self.ntp_config_method = args["ntp_config_method"] + self.ntp_address = args["ntp_address"] + self.ntp_address_backup = args["ntp_address_backup"] + + self.ssh = args["ssh"] + + self.ssid = args["ssid"] + self.url = args["api_url"] + self.creds = dict(url_password=args["api_password"], + validate_certs=args["validate_certs"], + url_username=args["api_username"], ) + + self.retries = 0 + + self.check_mode = self.module.check_mode + self.post_body = dict() + + log_path = args["log_path"] + + # logging setup + self._logger = logging.getLogger(self.__class__.__name__) + + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + @property + def controllers(self): + """Retrieve a mapping of controller labels to their references + { + 'A': '070000000000000000000001', + 'B': '070000000000000000000002', + } + :return: the controllers defined on the system + """ + try: + (rc, controllers) = request(self.url + 'storage-systems/%s/controllers' + % self.ssid, headers=HEADERS, **self.creds) + except Exception as err: + controllers = list() + self.module.fail_json( + msg="Failed to retrieve the controller settings. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + controllers.sort(key=lambda c: c['physicalLocation']['slot']) + + controllers_dict = dict() + i = ord('A') + for controller in controllers: + label = chr(i) + settings = dict(controllerSlot=controller['physicalLocation']['slot'], + controllerRef=controller['controllerRef'], + ssh=controller['networkSettings']['remoteAccessEnabled']) + controllers_dict[label] = settings + i += 1 + + return controllers_dict + + @property + def interface(self): + net_interfaces = list() + try: + (rc, net_interfaces) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' + % self.ssid, headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="Failed to retrieve defined management interfaces. Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + controllers = self.controllers + controller = controllers[self.controller] + + net_interfaces = [iface for iface in net_interfaces if iface["controllerRef"] == controller["controllerRef"]] + + # Find the correct interface + iface = None + for net in net_interfaces: + if self.name: + if net["alias"] == self.name or net["interfaceName"] == self.name: + iface = net + break + elif self.channel: + if net["channel"] == self.channel: + iface = net + break + + if iface is None: + identifier = self.name if self.name is not None else self.channel + self.module.fail_json(msg="We could not find an interface matching [%s] on Array=[%s]." + % (identifier, self.ssid)) + + return dict(alias=iface["alias"], + channel=iface["channel"], + link_status=iface["linkStatus"], + enabled=iface["ipv4Enabled"], + address=iface["ipv4Address"], + gateway=iface["ipv4GatewayAddress"], + subnet_mask=iface["ipv4SubnetMask"], + dns_config_method=iface["dnsProperties"]["acquisitionProperties"]["dnsAcquisitionType"], + dns_servers=iface["dnsProperties"]["acquisitionProperties"]["dnsServers"], + ntp_config_method=iface["ntpProperties"]["acquisitionProperties"]["ntpAcquisitionType"], + ntp_servers=iface["ntpProperties"]["acquisitionProperties"]["ntpServers"], + config_method=iface["ipv4AddressConfigMethod"], + controllerRef=iface["controllerRef"], + controllerSlot=iface["controllerSlot"], + ipv6Enabled=iface["ipv6Enabled"], + id=iface["interfaceRef"], ) + + def get_enable_interface_settings(self, iface, expected_iface, update, body): + """Enable or disable the IPv4 network interface.""" + if self.enable_interface: + if not iface["enabled"]: + update = True + body["ipv4Enabled"] = True + else: + if iface["enabled"]: + update = True + body["ipv4Enabled"] = False + + expected_iface["enabled"] = body["ipv4Enabled"] + return update, expected_iface, body + + def get_interface_settings(self, iface, expected_iface, update, body): + """Update network interface settings.""" + + if self.config_method == "dhcp": + if iface["config_method"] != "configDhcp": + update = True + body["ipv4AddressConfigMethod"] = "configDhcp" + + else: + if iface["config_method"] != "configStatic": + update = True + body["ipv4AddressConfigMethod"] = "configStatic" + + if iface["address"] != self.address: + update = True + body["ipv4Address"] = self.address + + if iface["subnet_mask"] != self.subnet_mask: + update = True + body["ipv4SubnetMask"] = self.subnet_mask + + if self.gateway and iface["gateway"] != self.gateway: + update = True + body["ipv4GatewayAddress"] = self.gateway + + expected_iface["address"] = body["ipv4Address"] + expected_iface["subnet_mask"] = body["ipv4SubnetMask"] + expected_iface["gateway"] = body["ipv4GatewayAddress"] + + expected_iface["config_method"] = body["ipv4AddressConfigMethod"] + + return update, expected_iface, body + + def get_dns_server_settings(self, iface, expected_iface, update, body): + """Add DNS server information to the request body.""" + if self.dns_config_method == "dhcp": + if iface["dns_config_method"] != "dhcp": + update = True + body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="dhcp") + + elif self.dns_config_method == "static": + dns_servers = [dict(addressType="ipv4", ipv4Address=self.dns_address)] + if self.dns_address_backup: + dns_servers.append(dict(addressType="ipv4", ipv4Address=self.dns_address_backup)) + + body["dnsAcquisitionDescriptor"] = dict(dnsAcquisitionType="stat", dnsServers=dns_servers) + + if (iface["dns_config_method"] != "stat" or + len(iface["dns_servers"]) != len(dns_servers) or + (len(iface["dns_servers"]) == 2 and + (iface["dns_servers"][0]["ipv4Address"] != self.dns_address or + iface["dns_servers"][1]["ipv4Address"] != self.dns_address_backup)) or + (len(iface["dns_servers"]) == 1 and + iface["dns_servers"][0]["ipv4Address"] != self.dns_address)): + update = True + + expected_iface["dns_servers"] = dns_servers + + expected_iface["dns_config_method"] = body["dnsAcquisitionDescriptor"]["dnsAcquisitionType"] + return update, expected_iface, body + + def get_ntp_server_settings(self, iface, expected_iface, update, body): + """Add NTP server information to the request body.""" + if self.ntp_config_method == "disable": + if iface["ntp_config_method"] != "disabled": + update = True + body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="disabled") + + elif self.ntp_config_method == "dhcp": + if iface["ntp_config_method"] != "dhcp": + update = True + body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="dhcp") + + elif self.ntp_config_method == "static": + ntp_servers = [dict(addrType="ipvx", ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address))] + if self.ntp_address_backup: + ntp_servers.append(dict(addrType="ipvx", + ipvxAddress=dict(addressType="ipv4", ipv4Address=self.ntp_address_backup))) + + body["ntpAcquisitionDescriptor"] = dict(ntpAcquisitionType="stat", ntpServers=ntp_servers) + + if (iface["ntp_config_method"] != "stat" or + len(iface["ntp_servers"]) != len(ntp_servers) or + ((len(iface["ntp_servers"]) == 2 and + (iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address or + iface["ntp_servers"][1]["ipvxAddress"]["ipv4Address"] != self.ntp_address_backup)) or + (len(iface["ntp_servers"]) == 1 and + iface["ntp_servers"][0]["ipvxAddress"]["ipv4Address"] != self.ntp_address))): + update = True + + expected_iface["ntp_servers"] = ntp_servers + + expected_iface["ntp_config_method"] = body["ntpAcquisitionDescriptor"]["ntpAcquisitionType"] + return update, expected_iface, body + + def get_remote_ssh_settings(self, settings, update, body): + """Configure network interface ports for remote ssh access.""" + if self.ssh != settings["ssh"]: + update = True + + body["enableRemoteAccess"] = self.ssh + return update, body + + def update_array(self, settings, iface): + """Update controller with new interface, dns service, ntp service and/or remote ssh access information. + + :returns: whether information passed will modify the controller's current state + :rtype: bool + """ + update = False + body = dict(controllerRef=settings['controllerRef'], + interfaceRef=iface['id']) + expected_iface = iface.copy() + + # Check if api url is using the effected management interface to change itself + update_used_matching_address = False + if self.enable_interface and self.config_method: + netloc = list(urlparse.urlparse(self.url))[1] + address = netloc.split(":")[0] + address_info = socket.getaddrinfo(address, 8443) + url_address_info = socket.getaddrinfo(iface["address"], 8443) + update_used_matching_address = any(info in url_address_info for info in address_info) + + self._logger.info("update_used_matching_address: %s", update_used_matching_address) + + # Populate the body of the request and check for changes + if self.enable_interface is not None: + update, expected_iface, body = self.get_enable_interface_settings(iface, expected_iface, update, body) + + if self.config_method is not None: + update, expected_iface, body = self.get_interface_settings(iface, expected_iface, update, body) + + if self.dns_config_method is not None: + update, expected_iface, body = self.get_dns_server_settings(iface, expected_iface, update, body) + + if self.ntp_config_method is not None: + update, expected_iface, body = self.get_ntp_server_settings(iface, expected_iface, update, body) + + if self.ssh is not None: + update, body = self.get_remote_ssh_settings(settings, update, body) + iface["ssh"] = self.ssh + expected_iface["ssh"] = self.ssh + + # debug information + self._logger.info(pformat(body)) + self._logger.info(pformat(iface)) + self._logger.info(pformat(expected_iface)) + + if self.check_mode: + return update + + if update and not self.check_mode: + if not update_used_matching_address: + try: + (rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' + % self.ssid, method='POST', data=json.dumps(body), headers=HEADERS, + timeout=300, ignore_errors=True, **self.creds) + if rc == 422: + if data['retcode'] == "4" or data['retcode'] == "illegalParam": + if not (body['ipv4Enabled'] or iface['ipv6Enabled']): + self.module.fail_json(msg="This storage-system already has IPv6 connectivity disabled. " + "DHCP configuration for IPv4 is required at a minimum." + " Array Id [%s] Message [%s]." + % (self.ssid, data['errorMessage'])) + else: + self.module.fail_json(msg="We failed to configure the management interface. Array Id " + "[%s] Message [%s]." % (self.ssid, data)) + elif rc >= 300: + self.module.fail_json( + msg="We failed to configure the management interface. Array Id [%s] Message [%s]." % + (self.ssid, data)) + + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json( + msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + else: + self.update_api_address_interface_match(body) + + return self.validate_changes(expected_iface) if update and iface["link_status"] != "up" else update + + def update_api_address_interface_match(self, body): + """Change network interface address which matches the api_address""" + try: + try: + (rc, data) = request(self.url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid, + use_proxy=False, force=True, ignore_errors=True, method='POST', + data=json.dumps(body), headers=HEADERS, timeout=10, **self.creds) + except Exception: + url_parts = list(urlparse.urlparse(self.url)) + domain = url_parts[1].split(":") + domain[0] = self.address + url_parts[1] = ":".join(domain) + expected_url = urlparse.urlunparse(url_parts) + self._logger.info(pformat(expected_url)) + + (rc, data) = request(expected_url + 'storage-systems/%s/configuration/ethernet-interfaces' % self.ssid, + headers=HEADERS, timeout=300, **self.creds) + return + except Exception as err: + self._logger.info(type(err)) + self.module.fail_json( + msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def validate_changes(self, expected_iface, retry=6): + """Validate interface changes were applied to the controller interface port. 30 second timeout""" + if self.interface != expected_iface: + time.sleep(5) + if retry: + return self.validate_changes(expected_iface, retry - 1) + + self.module.fail_json(msg="Update failure: we failed to verify the necessary state change.") + + return True + + def check_health(self): + """It's possible, due to a previous operation, for the API to report a 424 (offline) status for the + storage-system. Therefore, we run a manual check with retries to attempt to contact the system before we + continue. + """ + try: + (rc, data) = request(self.url + 'storage-systems/%s/controllers' + % self.ssid, headers=HEADERS, + ignore_errors=True, **self.creds) + + # We've probably recently changed the interface settings and it's still coming back up: retry. + if rc == 424: + if self.retries < self.MAX_RETRIES: + self.retries += 1 + self._logger.info("We hit a 424, retrying in 5s.") + time.sleep(5) + self.check_health() + else: + self.module.fail_json( + msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." % + (self.ssid, data)) + elif rc >= 300: + self.module.fail_json( + msg="We failed to pull storage-system information. Array Id [%s] Message [%s]." % + (self.ssid, data)) + # This is going to catch cases like a connection failure + except Exception as err: + if self.retries < self.MAX_RETRIES: + self._logger.info("We hit a connection failure, retrying in 5s.") + self.retries += 1 + time.sleep(5) + self.check_health() + else: + self.module.fail_json( + msg="Connection failure: we failed to modify the network settings! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update(self): + """Update storage system with necessary changes.""" + # Check if the storage array can be contacted + self.check_health() + + # make the necessary changes to the storage system + settings = self.controllers[self.controller] + iface = self.interface + self._logger.info(pformat(settings)) + self._logger.info(pformat(iface)) + update = self.update_array(settings, iface) + + self.module.exit_json(msg="The interface settings have been updated.", changed=update) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + iface = MgmtInterface() + iface() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py new file mode 100644 index 000000000..8bcee43fc --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_group.py @@ -0,0 +1,376 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: netapp_e_snapshot_group +short_description: NetApp E-Series manage snapshot groups +description: + - Create, update, delete snapshot groups for NetApp E-series storage arrays +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + ssid: + description: + - Storage system identifier + type: str + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + type: str + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + state: + description: + - Whether to ensure the group is present or absent. + required: True + type: str + choices: + - present + - absent + name: + description: + - The name to give the snapshot group + type: str + required: True + base_volume_name: + description: + - The name of the base volume or thin volume to use as the base for the new snapshot group. + - If a snapshot group with an identical C(name) already exists but with a different base volume + an error will be returned. + type: str + required: True + repo_pct: + description: + - The size of the repository in relation to the size of the base volume + required: False + type: int + default: 20 + warning_threshold: + description: + - The repository utilization warning threshold, as a percentage of the repository volume capacity. + required: False + type: int + default: 80 + delete_limit: + description: + - The automatic deletion indicator. + - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of + snapshot images limited to the number specified. + - This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group. + required: False + type: int + default: 30 + full_policy: + description: + - The behavior on when the data repository becomes full. + - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group + required: False + default: purgepit + type: str + choices: ['unknown', 'failbasewrites', 'purgepit'] + storage_pool_name: + required: True + description: + - The name of the storage pool on which to allocate the repository volume. + type: str + rollback_priority: + required: False + description: + - The importance of the rollback operation. + - This value is overridden by consistency group setting if this snapshot group is associated with a consistency group + choices: ['highest', 'high', 'medium', 'low', 'lowest'] + type: str + default: medium +""" + +EXAMPLES = """ + - name: Configure Snapshot group + netapp_e_snapshot_group: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + base_volume_name: SSGroup_test + name=: OOSS_Group + repo_pct: 20 + warning_threshold: 85 + delete_limit: 30 + full_policy: purgepit + storage_pool_name: Disk_Pool_1 + rollback_priority: medium +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: json facts for newly created snapshot group. +""" +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class SnapshotGroup(object): + def __init__(self): + + argument_spec = basic_auth_argument_spec() + argument_spec.update( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + state=dict(required=True, choices=['present', 'absent']), + base_volume_name=dict(required=True), + name=dict(required=True), + repo_pct=dict(default=20, type='int'), + warning_threshold=dict(default=80, type='int'), + delete_limit=dict(default=30, type='int'), + full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']), + rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']), + storage_pool_name=dict(type='str'), + ssid=dict(required=True), + ) + + self.module = AnsibleModule(argument_spec=argument_spec) + + self.post_data = dict() + self.warning_threshold = self.module.params['warning_threshold'] + self.base_volume_name = self.module.params['base_volume_name'] + self.name = self.module.params['name'] + self.repo_pct = self.module.params['repo_pct'] + self.delete_limit = self.module.params['delete_limit'] + self.full_policy = self.module.params['full_policy'] + self.rollback_priority = self.module.params['rollback_priority'] + self.storage_pool_name = self.module.params['storage_pool_name'] + self.state = self.module.params['state'] + + self.url = self.module.params['api_url'] + self.user = self.module.params['api_username'] + self.pwd = self.module.params['api_password'] + self.certs = self.module.params['validate_certs'] + self.ssid = self.module.params['ssid'] + + if not self.url.endswith('/'): + self.url += '/' + + self.changed = False + + @property + def pool_id(self): + pools = 'storage-systems/%s/storage-pools' % self.ssid + url = self.url + pools + try: + (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd) + except Exception as err: + self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " + + "Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + + for pool in data: + if pool['name'] == self.storage_pool_name: + self.pool_data = pool + return pool['id'] + + self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name) + + @property + def volume_id(self): + volumes = 'storage-systems/%s/volumes' % self.ssid + url = self.url + volumes + try: + rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " + + "Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + qty = 0 + for volume in data: + if volume['name'] == self.base_volume_name: + qty += 1 + + if qty > 1: + self.module.fail_json(msg="More than one volume with the name: %s was found, " + "please ensure your volume has a unique name" % self.base_volume_name) + else: + Id = volume['id'] + self.volume = volume + + try: + return Id + except NameError: + self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name) + + @property + def snapshot_group_id(self): + url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid + try: + rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Failed to fetch snapshot groups. " + + "Id [%s]. Error [%s]." % (self.ssid, to_native(err))) + for ssg in data: + if ssg['name'] == self.name: + self.ssg_data = ssg + return ssg['id'] + + return None + + @property + def ssg_needs_update(self): + if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \ + self.ssg_data['autoDeleteLimit'] != self.delete_limit or \ + self.ssg_data['repFullPolicy'] != self.full_policy or \ + self.ssg_data['rollbackPriority'] != self.rollback_priority: + return True + else: + return False + + def create_snapshot_group(self): + self.post_data = dict( + baseMappableObjectId=self.volume_id, + name=self.name, + repositoryPercentage=self.repo_pct, + warningThreshold=self.warning_threshold, + autoDeleteLimit=self.delete_limit, + fullPolicy=self.full_policy, + storagePoolId=self.pool_id, + ) + snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid + url = self.url + snapshot + try: + rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Failed to create snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + to_native(err))) + + if not self.snapshot_group_id: + self.snapshot_group_id = self.ssg_data['id'] + + if self.ssg_needs_update: + self.update_ssg() + else: + self.module.exit_json(changed=True, **self.ssg_data) + + def update_ssg(self): + self.post_data = dict( + warningThreshold=self.warning_threshold, + autoDeleteLimit=self.delete_limit, + fullPolicy=self.full_policy, + rollbackPriority=self.rollback_priority + ) + + url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id) + try: + rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Failed to update snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + to_native(err))) + + def apply(self): + if self.state == 'absent': + if self.snapshot_group_id: + try: + rc, resp = request( + self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id), + method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user, + validate_certs=self.certs) + except Exception as err: + self.module.fail_json(msg="Failed to delete snapshot group. " + + "Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name, + self.ssid, + to_native(err))) + self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data) + else: + self.module.exit_json(changed=False, msg="Snapshot group absent") + + elif self.snapshot_group_id: + if self.ssg_needs_update: + self.update_ssg() + self.module.exit_json(changed=True, **self.ssg_data) + else: + self.module.exit_json(changed=False, **self.ssg_data) + else: + self.create_snapshot_group() + + +def main(): + vg = SnapshotGroup() + vg.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py new file mode 100644 index 000000000..f0ea8fb66 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_images.py @@ -0,0 +1,257 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: netapp_e_snapshot_images +short_description: NetApp E-Series create and delete snapshot images +description: + - Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays. + - Only the oldest snapshot image can be deleted so consistency is preserved. + - "Related: Snapshot volumes are created from snapshot images." +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +options: + ssid: + description: + - Storage system identifier + type: str + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + type: str + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + snapshot_group: + description: + - The name of the snapshot group in which you want to create a snapshot image. + required: True + type: str + state: + description: + - Whether a new snapshot image should be created or oldest be deleted. + required: True + type: str + choices: ['create', 'remove'] +""" +EXAMPLES = """ + - name: Create Snapshot + netapp_e_snapshot_images: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ validate_certs }}" + snapshot_group: "3300000060080E5000299C24000005B656D9F394" + state: 'create' +""" +RETURN = """ +--- + msg: + description: State of operation + type: str + returned: always + sample: "Created snapshot image" + image_id: + description: ID of snapshot image + type: str + returned: state == created + sample: "3400000060080E5000299B640063074057BC5C5E " +""" + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name): + snap_groups = 'storage-systems/%s/snapshot-groups' % ssid + snap_groups_url = api_url + snap_groups + (ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + + snapshot_group_id = None + for snapshot_group in snapshot_groups: + if name == snapshot_group['label']: + snapshot_group_id = snapshot_group['pitGroupRef'] + break + if snapshot_group_id is None: + module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid)) + + return snapshot_group + + +def oldest_image(module, ssid, api_url, api_pwd, api_usr, name): + get_status = 'storage-systems/%s/snapshot-images' % ssid + url = api_url + get_status + + try: + (ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + except Exception as err: + module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" % + (name, ssid, to_native(err))) + if not images: + module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid)) + + oldest = min(images, key=lambda x: x['pitSequenceNumber']) + if oldest is None or "pitRef" not in oldest: + module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid)) + + return oldest + + +def create_image(module, ssid, api_url, pwd, user, p, snapshot_group): + snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group) + snapshot_group_id = snapshot_group_obj['pitGroupRef'] + endpoint = 'storage-systems/%s/snapshot-images' % ssid + url = api_url + endpoint + post_data = json.dumps({'groupId': snapshot_group_id}) + + image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + + if image_data[1]['status'] == 'optimal': + status = True + id = image_data[1]['id'] + else: + status = False + id = '' + + return status, id + + +def delete_image(module, ssid, api_url, pwd, user, snapshot_group): + image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group) + image_id = image['pitRef'] + endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id) + url = api_url + endpoint + + try: + (ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS, + validate_certs=module.params['validate_certs']) + except Exception as e: + image_data = (e[0], e[1]) + + if ret == 204: + deleted_status = True + error_message = '' + else: + deleted_status = False + error_message = image_data[1]['errorMessage'] + + return deleted_status, error_message + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + snapshot_group=dict(required=True, type='str'), + ssid=dict(required=True, type='str'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + validate_certs=dict(required=False, type='bool', default=True), + state=dict(required=True, choices=['create', 'remove'], type='str'), + )) + module = AnsibleModule(argument_spec) + + p = module.params + + ssid = p.pop('ssid') + api_url = p.pop('api_url') + user = p.pop('api_username') + pwd = p.pop('api_password') + snapshot_group = p.pop('snapshot_group') + desired_state = p.pop('state') + + if not api_url.endswith('/'): + api_url += '/' + + if desired_state == 'create': + created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group) + + if created_status: + module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id) + else: + module.fail_json( + msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group)) + else: + deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group) + + if deleted: + module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group)) + else: + module.fail_json( + msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % ( + ssid, snapshot_group, error_msg)) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py new file mode 100644 index 000000000..0019d6f67 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_snapshot_volume.py @@ -0,0 +1,289 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = """ +--- +module: netapp_e_snapshot_volume +short_description: NetApp E-Series manage snapshot volumes. +description: + - Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays. +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +notes: + - Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status + will be returned, no other changes can be made to a pre-existing snapshot volume. +options: + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + type: str + validate_certs: + required: false + default: true + description: + - Should https certificates be validated? + type: bool + ssid: + description: + - storage array ID + type: str + required: true + snapshot_image_id: + required: True + type: str + description: + - The identifier of the snapshot image used to create the new snapshot volume. + - "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want." + full_threshold: + description: + - The repository utilization warning threshold percentage + default: 85 + type: int + name: + required: True + description: + - The name you wish to give the snapshot volume + type: str + view_mode: + required: True + type: str + description: + - The snapshot volume access mode + choices: ['readOnly', 'readWrite', 'modeUnknown', '__Undefined'] + default: 'readOnly' + repo_percentage: + description: + - The size of the view in relation to the size of the base volume + default: 20 + type: int + storage_pool_name: + description: + - Name of the storage pool on which to allocate the repository volume. + type: str + required: True + state: + description: + - Whether to create or remove the snapshot volume + required: True + type: str + choices: + - absent + - present +""" +EXAMPLES = """ + - name: Snapshot volume + netapp_e_snapshot_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}/" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + state: present + storage_pool_name: "{{ snapshot_volume_storage_pool_name }}" + snapshot_image_id: "{{ snapshot_volume_image_id }}" + name: "{{ snapshot_volume_name }}" +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: Json facts for the volume that was created. +""" +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} +import json + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule + +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +class SnapshotVolume(object): + def __init__(self): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + api_username=dict(type='str', required=True), + api_password=dict(type='str', required=True, no_log=True), + api_url=dict(type='str', required=True), + ssid=dict(type='str', required=True), + snapshot_image_id=dict(type='str', required=True), + full_threshold=dict(type='int', default=85), + name=dict(type='str', required=True), + view_mode=dict(type='str', default='readOnly', + choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']), + repo_percentage=dict(type='int', default=20), + storage_pool_name=dict(type='str', required=True), + state=dict(type='str', required=True, choices=['absent', 'present']) + )) + + self.module = AnsibleModule(argument_spec=argument_spec) + args = self.module.params + self.state = args['state'] + self.ssid = args['ssid'] + self.snapshot_image_id = args['snapshot_image_id'] + self.full_threshold = args['full_threshold'] + self.name = args['name'] + self.view_mode = args['view_mode'] + self.repo_percentage = args['repo_percentage'] + self.storage_pool_name = args['storage_pool_name'] + self.url = args['api_url'] + self.user = args['api_username'] + self.pwd = args['api_password'] + self.certs = args['validate_certs'] + + if not self.url.endswith('/'): + self.url += '/' + + @property + def pool_id(self): + pools = 'storage-systems/%s/storage-pools' % self.ssid + url = self.url + pools + (rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd, + validate_certs=self.certs) + + for pool in data: + if pool['name'] == self.storage_pool_name: + self.pool_data = pool + return pool['id'] + + self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name) + + @property + def ss_vol_exists(self): + rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS, + url_username=self.user, url_password=self.pwd, validate_certs=self.certs) + if ss_vols: + for ss_vol in ss_vols: + if ss_vol['name'] == self.name: + self.ss_vol = ss_vol + return True + else: + return False + + return False + + @property + def ss_vol_needs_update(self): + if self.ss_vol['fullWarnThreshold'] != self.full_threshold: + return True + else: + return False + + def create_ss_vol(self): + post_data = dict( + snapshotImageId=self.snapshot_image_id, + fullThreshold=self.full_threshold, + name=self.name, + viewMode=self.view_mode, + repositoryPercentage=self.repo_percentage, + repositoryPoolId=self.pool_id + ) + + rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, + data=json.dumps(post_data), headers=HEADERS, url_username=self.user, + url_password=self.pwd, validate_certs=self.certs, method='POST') + + self.ss_vol = create_resp + # Doing a check after creation because the creation call fails to set the specified warning threshold + if self.ss_vol_needs_update: + self.update_ss_vol() + else: + self.module.exit_json(changed=True, **create_resp) + + def update_ss_vol(self): + post_data = dict( + fullThreshold=self.full_threshold, + ) + + rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']), + data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd, + method='POST', validate_certs=self.certs) + + self.module.exit_json(changed=True, **resp) + + def remove_ss_vol(self): + rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']), + headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs, + method='DELETE') + self.module.exit_json(changed=True, msg="Volume successfully deleted") + + def apply(self): + if self.state == 'present': + if self.ss_vol_exists: + if self.ss_vol_needs_update: + self.update_ss_vol() + else: + self.module.exit_json(changed=False, **self.ss_vol) + else: + self.create_ss_vol() + else: + if self.ss_vol_exists: + self.remove_ss_vol() + else: + self.module.exit_json(changed=False, msg="Volume already absent") + + +def main(): + sv = SnapshotVolume() + sv.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py new file mode 100644 index 000000000..a0f0d005e --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storage_system.py @@ -0,0 +1,310 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + + +DOCUMENTATION = ''' +module: netapp_e_storage_system +version_added: "2.2" +short_description: NetApp E-Series Web Services Proxy manage storage arrays +description: +- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays. +options: + api_username: + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + required: true + api_password: + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + required: true + api_url: + description: + - The url to the SANtricity WebServices Proxy or embedded REST API. + required: true + type: str + validate_certs: + description: + - Should https certificates be validated? + type: bool + default: 'yes' + ssid: + description: + - The ID of the array to manage. This value must be unique for each array. + type: str + required: true + state: + description: + - Whether the specified array should be configured on the Web Services Proxy or not. + required: true + type: str + choices: ['present', 'absent'] + controller_addresses: + description: + - The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter. + type: list + required: true + array_wwn: + description: + - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of + controller_addresses parameter. + type: str + array_password: + description: + - The management password of the array to manage, if set. + type: str + enable_trace: + description: + - Enable trace logging for SYMbol calls to the storage system. + type: bool + default: 'no' + meta_tags: + description: + - Optional meta tags to associate to this storage system + type: list + array_status_timeout_sec: + description: + - Array status timeout measured in seconds + default: 60 + type: int +author: Kevin Hulquest (@hulquest) +''' + +EXAMPLES = ''' +--- + - name: Presence of storage system + netapp_e_storage_system: + ssid: "{{ item.key }}" + state: present + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + controller_addresses: + - "{{ item.value.address1 }}" + - "{{ item.value.address2 }}" + with_dict: "{{ storage_systems }}" + when: check_storage_system +''' + +RETURN = ''' +msg: + description: State of request + type: str + returned: always + sample: 'Storage system removed.' +''' +import json +from datetime import datetime as dt, timedelta +from time import sleep + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible.module_utils.urls import open_url +from ansible.module_utils.six.moves.urllib.error import HTTPError + + +def request(url, data=None, headers=None, method='GET', use_proxy=True, + force=False, last_mod_time=None, timeout=10, validate_certs=True, + url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False): + try: + r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy, + force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs, + url_username=url_username, url_password=url_password, http_agent=http_agent, + force_basic_auth=force_basic_auth) + except HTTPError as err: + r = err.fp + + try: + raw_data = r.read() + if raw_data: + data = json.loads(raw_data) + else: + raw_data = None + except Exception: + if ignore_errors: + pass + else: + raise Exception(raw_data) + + resp_code = r.getcode() + + if resp_code >= 400 and not ignore_errors: + raise Exception(resp_code, data) + else: + return resp_code, data + + +def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout): + (rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers, + method='POST', url_username=api_usr, url_password=api_pwd, + validate_certs=validate_certs) + status = None + return_resp = resp + if 'status' in resp: + status = resp['status'] + + if rc == 201: + status = 'neverContacted' + fail_after_time = dt.utcnow() + timedelta(seconds=timeout) + + while status == 'neverContacted': + if dt.utcnow() > fail_after_time: + raise Exception("web proxy timed out waiting for array status") + + sleep(1) + (rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid, + headers=dict(Accept="application/json"), url_username=api_usr, + url_password=api_pwd, validate_certs=validate_certs, + ignore_errors=True) + status = system_resp['status'] + return_resp = system_resp + + return status, return_resp + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(dict( + state=dict(required=True, choices=['present', 'absent']), + ssid=dict(required=True, type='str'), + controller_addresses=dict(type='list'), + array_wwn=dict(required=False, type='str'), + array_password=dict(required=False, type='str', no_log=True), + array_status_timeout_sec=dict(default=60, type='int'), + enable_trace=dict(default=False, type='bool'), + meta_tags=dict(type='list') + )) + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[['controller_addresses', 'array_wwn']], + required_if=[('state', 'present', ['controller_addresses'])] + ) + + p = module.params + + state = p['state'] + ssid = p['ssid'] + controller_addresses = p['controller_addresses'] + array_wwn = p['array_wwn'] + array_password = p['array_password'] + array_status_timeout_sec = p['array_status_timeout_sec'] + validate_certs = p['validate_certs'] + meta_tags = p['meta_tags'] + enable_trace = p['enable_trace'] + + api_usr = p['api_username'] + api_pwd = p['api_password'] + api_url = p['api_url'] + + changed = False + array_exists = False + + try: + (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"), + url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs, + ignore_errors=True) + except Exception as err: + module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, to_native(err))) + + array_exists = True + array_detail = resp + + if rc == 200: + if state == 'absent': + changed = True + array_exists = False + elif state == 'present': + current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i) + if set(controller_addresses) != current_addresses: + changed = True + if array_detail['wwn'] != array_wwn and array_wwn is not None: + module.fail_json( + msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' % + (ssid, array_detail['wwn']) + ) + elif rc == 404: + if state == 'present': + changed = True + array_exists = False + else: + changed = False + module.exit_json(changed=changed, msg="Storage system was not present.") + + if changed and not module.check_mode: + if state == 'present': + if not array_exists: + # add the array + array_add_req = dict( + id=ssid, + controllerAddresses=controller_addresses, + metaTags=meta_tags, + enableTrace=enable_trace + ) + + if array_wwn: + array_add_req['wwn'] = array_wwn + + if array_password: + array_add_req['password'] = array_password + + post_headers = dict(Accept="application/json") + post_headers['Content-Type'] = 'application/json' + request_data = json.dumps(array_add_req) + + try: + (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data, + array_status_timeout_sec) + except Exception as err: + module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." % + (ssid, request_data, to_native(err))) + + else: # array exists, modify... + post_headers = dict(Accept="application/json") + post_headers['Content-Type'] = 'application/json' + post_body = dict( + controllerAddresses=controller_addresses, + removeAllTags=True, + enableTrace=enable_trace, + metaTags=meta_tags + ) + + try: + (rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body, + array_status_timeout_sec) + except Exception as err: + module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." % + (ssid, post_body, to_native(err))) + + elif state == 'absent': + # delete the array + try: + (rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE', + url_username=api_usr, + url_password=api_pwd, validate_certs=validate_certs) + except Exception as err: + module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, to_native(err))) + + if rc == 422: + module.exit_json(changed=changed, msg="Storage system was not presented.") + if rc == 204: + module.exit_json(changed=changed, msg="Storage system removed.") + + module.exit_json(changed=changed, **resp) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py new file mode 100644 index 000000000..5c74a415b --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_storagepool.py @@ -0,0 +1,941 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {"metadata_version": "1.1", + "status": ["deprecated"], + "supported_by": "community"} + +DOCUMENTATION = """ +--- +module: netapp_e_storagepool +short_description: NetApp E-Series manage volume groups and disk pools +description: Create or remove volume groups and disk pools for NetApp E-series storage arrays. +version_added: '2.2' +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + description: + - Whether the specified storage pool should exist or not. + - Note that removing a storage pool currently requires the removal of all defined volumes first. + required: true + type: str + choices: ["present", "absent"] + name: + description: + - The name of the storage pool to manage + type: str + required: true + criteria_drive_count: + description: + - The number of disks to use for building the storage pool. + - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified. + - The pool will be expanded if this number exceeds the number of disks already in place (See expansion note below) + required: false + type: int + criteria_min_usable_capacity: + description: + - The minimum size of the storage pool (in size_unit). + - When I(state=="present") then I(criteria_drive_count) or I(criteria_min_usable_capacity) must be specified. + - The pool will be expanded if this value exceeds its current size. (See expansion note below) + required: false + type: float + criteria_drive_type: + description: + - The type of disk (hdd or ssd) to use when searching for candidates to use. + - When not specified each drive type will be evaluated until successful drive candidates are found starting with + the most prevalent drive type. + required: false + type: str + choices: ["hdd","ssd"] + criteria_size_unit: + description: + - The unit used to interpret size parameters + choices: ["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"] + type: str + default: "gb" + criteria_drive_min_size: + description: + - The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool. + type: float + criteria_drive_interface_type: + description: + - The interface type to use when selecting drives for the storage pool + - If not provided then all interface types will be considered. + choices: ["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"] + type: str + required: false + criteria_drive_require_da: + description: + - Ensures the storage pool will be created with only data assurance (DA) capable drives. + - Only available for new storage pools; existing storage pools cannot be converted. + default: false + type: bool + version_added: '2.9' + criteria_drive_require_fde: + description: + - Whether full disk encryption ability is required for drives to be added to the storage pool + default: false + type: bool + raid_level: + description: + - The RAID level of the storage pool to be created. + - Required only when I(state=="present"). + - When I(raid_level=="raidDiskPool") then I(criteria_drive_count >= 10 or criteria_drive_count >= 11) is required + depending on the storage array specifications. + - When I(raid_level=="raid0") then I(1<=criteria_drive_count) is required. + - When I(raid_level=="raid1") then I(2<=criteria_drive_count) is required. + - When I(raid_level=="raid3") then I(3<=criteria_drive_count<=30) is required. + - When I(raid_level=="raid5") then I(3<=criteria_drive_count<=30) is required. + - When I(raid_level=="raid6") then I(5<=criteria_drive_count<=30) is required. + - Note that raidAll will be treated as raidDiskPool and raid3 as raid5. + required: false + choices: ["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"] + type: str + default: "raidDiskPool" + secure_pool: + description: + - Enables security at rest feature on the storage pool. + - Will only work if all drives in the pool are security capable (FDE, FIPS, or mix) + - Warning, once security is enabled it is impossible to disable without erasing the drives. + required: false + type: bool + reserve_drive_count: + description: + - Set the number of drives reserved by the storage pool for reconstruction operations. + - Only valid on raid disk pools. + type: int + required: false + remove_volumes: + description: + - Prior to removing a storage pool, delete all volumes in the pool. + default: true + type: bool + erase_secured_drives: + description: + - If I(state=="absent") then all storage pool drives will be erase + - If I(state=="present") then delete all available storage array drives that have security enabled. + default: true + type: bool +notes: + - The expansion operations are non-blocking due to the time consuming nature of expanding volume groups + - Traditional volume groups (raid0, raid1, raid5, raid6) are performed in steps dictated by the storage array. Each + required step will be attempted until the request fails which is likely because of the required expansion time. + - raidUnsupported will be treated as raid0, raidAll as raidDiskPool and raid3 as raid5. + - Tray loss protection and drawer loss protection will be chosen if at all possible. +""" +EXAMPLES = """ +- name: No disk groups + netapp_e_storagepool: + ssid: "{{ ssid }}" + name: "{{ item }}" + state: absent + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: Json facts for the pool that was created. +""" +import functools +from itertools import groupby +from time import sleep +from pprint import pformat +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +def get_most_common_elements(iterator): + """Returns a generator containing a descending list of most common elements.""" + if not isinstance(iterator, list): + raise TypeError("iterator must be a list.") + + grouped = [(key, len(list(group))) for key, group in groupby(sorted(iterator))] + return sorted(grouped, key=lambda x: x[1], reverse=True) + + +def memoize(func): + """Generic memoizer for any function with any number of arguments including zero.""" + + @functools.wraps(func) + def wrapper(*args, **kwargs): + class MemoizeFuncArgs(dict): + def __missing__(self, _key): + self[_key] = func(*args, **kwargs) + return self[_key] + + key = str((args, kwargs)) if args and kwargs else "no_argument_response" + return MemoizeFuncArgs().__getitem__(key) + + return wrapper + + +class NetAppESeriesStoragePool(NetAppESeriesModule): + EXPANSION_TIMEOUT_SEC = 10 + DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT = 11 + + def __init__(self): + version = "02.00.0000.0000" + ansible_options = dict( + state=dict(required=True, choices=["present", "absent"], type="str"), + name=dict(required=True, type="str"), + criteria_size_unit=dict(choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"], + default="gb", type="str"), + criteria_drive_count=dict(type="int"), + criteria_drive_interface_type=dict(choices=["sas", "sas4k", "fibre", "fibre520b", "scsi", "sata", "pata"], + type="str"), + criteria_drive_type=dict(choices=["ssd", "hdd"], type="str", required=False), + criteria_drive_min_size=dict(type="float"), + criteria_drive_require_da=dict(type="bool", required=False), + criteria_drive_require_fde=dict(type="bool", required=False), + criteria_min_usable_capacity=dict(type="float"), + raid_level=dict(choices=["raidAll", "raid0", "raid1", "raid3", "raid5", "raid6", "raidDiskPool"], + default="raidDiskPool"), + erase_secured_drives=dict(type="bool", default=True), + secure_pool=dict(type="bool", default=False), + reserve_drive_count=dict(type="int"), + remove_volumes=dict(type="bool", default=True)) + + required_if = [["state", "present", ["raid_level"]]] + super(NetAppESeriesStoragePool, self).__init__(ansible_options=ansible_options, + web_services_version=version, + supports_check_mode=True, + required_if=required_if) + + args = self.module.params + self.state = args["state"] + self.ssid = args["ssid"] + self.name = args["name"] + self.criteria_drive_count = args["criteria_drive_count"] + self.criteria_min_usable_capacity = args["criteria_min_usable_capacity"] + self.criteria_size_unit = args["criteria_size_unit"] + self.criteria_drive_min_size = args["criteria_drive_min_size"] + self.criteria_drive_type = args["criteria_drive_type"] + self.criteria_drive_interface_type = args["criteria_drive_interface_type"] + self.criteria_drive_require_fde = args["criteria_drive_require_fde"] + self.criteria_drive_require_da = args["criteria_drive_require_da"] + self.raid_level = args["raid_level"] + self.erase_secured_drives = args["erase_secured_drives"] + self.secure_pool = args["secure_pool"] + self.reserve_drive_count = args["reserve_drive_count"] + self.remove_volumes = args["remove_volumes"] + self.pool_detail = None + + # Change all sizes to be measured in bytes + if self.criteria_min_usable_capacity: + self.criteria_min_usable_capacity = int(self.criteria_min_usable_capacity * + self.SIZE_UNIT_MAP[self.criteria_size_unit]) + if self.criteria_drive_min_size: + self.criteria_drive_min_size = int(self.criteria_drive_min_size * + self.SIZE_UNIT_MAP[self.criteria_size_unit]) + self.criteria_size_unit = "bytes" + + # Adjust unused raid level option to reflect documentation + if self.raid_level == "raidAll": + self.raid_level = "raidDiskPool" + if self.raid_level == "raid3": + self.raid_level = "raid5" + + @property + @memoize + def available_drives(self): + """Determine the list of available drives""" + return [drive["id"] for drive in self.drives if drive["available"] and drive["status"] == "optimal"] + + @property + @memoize + def available_drive_types(self): + """Determine the types of available drives sorted by the most common first.""" + types = [drive["driveMediaType"] for drive in self.drives] + return [entry[0] for entry in get_most_common_elements(types)] + + @property + @memoize + def available_drive_interface_types(self): + """Determine the types of available drives.""" + interfaces = [drive["phyDriveType"] for drive in self.drives] + return [entry[0] for entry in get_most_common_elements(interfaces)] + + @property + def storage_pool_drives(self): + """Retrieve list of drives found in storage pool.""" + return [drive for drive in self.drives if drive["currentVolumeGroupRef"] == self.pool_detail["id"] and not drive["hotSpare"]] + + @property + def expandable_drive_count(self): + """Maximum number of drives that a storage pool can be expanded at a given time.""" + capabilities = None + if self.raid_level == "raidDiskPool": + return len(self.available_drives) + + try: + rc, capabilities = self.request("storage-systems/%s/capabilities" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to fetch maximum expandable drive count. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + return capabilities["featureParameters"]["maxDCEDrives"] + + @property + def disk_pool_drive_minimum(self): + """Provide the storage array's minimum disk pool drive count.""" + rc, attr = self.request("storage-systems/%s/symbol/getSystemAttributeDefaults" % self.ssid, ignore_errors=True) + + # Standard minimum is 11 drives but some allow 10 drives. 10 will be the default + if (rc != 200 or "minimumDriveCount" not in attr["defaults"]["diskPoolDefaultAttributes"].keys() or + attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] == 0): + return self.DEFAULT_DISK_POOL_MINIMUM_DISK_COUNT + + return attr["defaults"]["diskPoolDefaultAttributes"]["minimumDriveCount"] + + def get_available_drive_capacities(self, drive_id_list=None): + """Determine the list of available drive capacities.""" + if drive_id_list: + available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives + if drive["id"] in drive_id_list and drive["available"] and + drive["status"] == "optimal"]) + else: + available_drive_capacities = set([int(drive["usableCapacity"]) for drive in self.drives + if drive["available"] and drive["status"] == "optimal"]) + + self.module.log("available drive capacities: %s" % available_drive_capacities) + return list(available_drive_capacities) + + @property + def drives(self): + """Retrieve list of drives found in storage pool.""" + drives = None + try: + rc, drives = self.request("storage-systems/%s/drives" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + return drives + + def is_drive_count_valid(self, drive_count): + """Validate drive count criteria is met.""" + if self.criteria_drive_count and drive_count < self.criteria_drive_count: + return False + + if self.raid_level == "raidDiskPool": + return drive_count >= self.disk_pool_drive_minimum + if self.raid_level == "raid0": + return drive_count > 0 + if self.raid_level == "raid1": + return drive_count >= 2 and (drive_count % 2) == 0 + if self.raid_level in ["raid3", "raid5"]: + return 3 <= drive_count <= 30 + if self.raid_level == "raid6": + return 5 <= drive_count <= 30 + return False + + @property + def storage_pool(self): + """Retrieve storage pool information.""" + storage_pools_resp = None + try: + rc, storage_pools_resp = self.request("storage-systems/%s/storage-pools" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]." + % (self.ssid, to_native(err), self.state)) + + pool_detail = [pool for pool in storage_pools_resp if pool["name"] == self.name] + return pool_detail[0] if pool_detail else dict() + + @property + def storage_pool_volumes(self): + """Retrieve list of volumes associated with storage pool.""" + volumes_resp = None + try: + rc, volumes_resp = self.request("storage-systems/%s/volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]." + % (self.ssid, to_native(err), self.state)) + + group_ref = self.storage_pool["volumeGroupRef"] + storage_pool_volume_list = [volume["id"] for volume in volumes_resp if volume["volumeGroupRef"] == group_ref] + return storage_pool_volume_list + + def get_ddp_capacity(self, expansion_drive_list): + """Return the total usable capacity based on the additional drives.""" + + def get_ddp_error_percent(_drive_count, _extent_count): + """Determine the space reserved for reconstruction""" + if _drive_count <= 36: + if _extent_count <= 600: + return 0.40 + elif _extent_count <= 1400: + return 0.35 + elif _extent_count <= 6200: + return 0.20 + elif _extent_count <= 50000: + return 0.15 + elif _drive_count <= 64: + if _extent_count <= 600: + return 0.20 + elif _extent_count <= 1400: + return 0.15 + elif _extent_count <= 6200: + return 0.10 + elif _extent_count <= 50000: + return 0.05 + elif _drive_count <= 480: + if _extent_count <= 600: + return 0.20 + elif _extent_count <= 1400: + return 0.15 + elif _extent_count <= 6200: + return 0.10 + elif _extent_count <= 50000: + return 0.05 + + self.module.fail_json(msg="Drive count exceeded the error percent table. Array[%s]" % self.ssid) + + def get_ddp_reserved_drive_count(_disk_count): + """Determine the number of reserved drive.""" + reserve_count = 0 + + if self.reserve_drive_count: + reserve_count = self.reserve_drive_count + elif _disk_count >= 256: + reserve_count = 8 + elif _disk_count >= 192: + reserve_count = 7 + elif _disk_count >= 128: + reserve_count = 6 + elif _disk_count >= 64: + reserve_count = 4 + elif _disk_count >= 32: + reserve_count = 3 + elif _disk_count >= 12: + reserve_count = 2 + elif _disk_count == 11: + reserve_count = 1 + + return reserve_count + + if self.pool_detail: + drive_count = len(self.storage_pool_drives) + len(expansion_drive_list) + else: + drive_count = len(expansion_drive_list) + + drive_usable_capacity = min(min(self.get_available_drive_capacities()), + min(self.get_available_drive_capacities(expansion_drive_list))) + drive_data_extents = ((drive_usable_capacity - 8053063680) / 536870912) + maximum_stripe_count = (drive_count * drive_data_extents) / 10 + + error_percent = get_ddp_error_percent(drive_count, drive_data_extents) + error_overhead = (drive_count * drive_data_extents / 10 * error_percent + 10) / 10 + + total_stripe_count = maximum_stripe_count - error_overhead + stripe_count_per_drive = total_stripe_count / drive_count + reserved_stripe_count = get_ddp_reserved_drive_count(drive_count) * stripe_count_per_drive + available_stripe_count = total_stripe_count - reserved_stripe_count + + return available_stripe_count * 4294967296 + + @memoize + def get_candidate_drives(self): + """Retrieve set of drives candidates for creating a new storage pool.""" + + def get_candidate_drive_request(): + """Perform request for new volume creation.""" + candidates_list = list() + drive_types = [self.criteria_drive_type] if self.criteria_drive_type else self.available_drive_types + interface_types = [self.criteria_drive_interface_type] \ + if self.criteria_drive_interface_type else self.available_drive_interface_types + + for interface_type in interface_types: + for drive_type in drive_types: + candidates = None + volume_candidate_request_data = dict( + type="diskPool" if self.raid_level == "raidDiskPool" else "traditional", + diskPoolVolumeCandidateRequestData=dict( + reconstructionReservedDriveCount=65535)) + candidate_selection_type = dict( + candidateSelectionType="count", + driveRefList=dict(driveRef=self.available_drives)) + criteria = dict(raidLevel=self.raid_level, + phyDriveType=interface_type, + dssPreallocEnabled=False, + securityType="capable" if self.criteria_drive_require_fde else "none", + driveMediaType=drive_type, + onlyProtectionInformationCapable=True if self.criteria_drive_require_da else False, + volumeCandidateRequestData=volume_candidate_request_data, + allocateReserveSpace=False, + securityLevel="fde" if self.criteria_drive_require_fde else "none", + candidateSelectionType=candidate_selection_type) + + try: + rc, candidates = self.request("storage-systems/%s/symbol/getVolumeCandidates?verboseError" + "Response=true" % self.ssid, data=criteria, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + if candidates: + candidates_list.extend(candidates["volumeCandidate"]) + + # Sort output based on tray and then drawer protection first + tray_drawer_protection = list() + tray_protection = list() + drawer_protection = list() + no_protection = list() + sorted_candidates = list() + for item in candidates_list: + if item["trayLossProtection"]: + if item["drawerLossProtection"]: + tray_drawer_protection.append(item) + else: + tray_protection.append(item) + elif item["drawerLossProtection"]: + drawer_protection.append(item) + else: + no_protection.append(item) + + if tray_drawer_protection: + sorted_candidates.extend(tray_drawer_protection) + if tray_protection: + sorted_candidates.extend(tray_protection) + if drawer_protection: + sorted_candidates.extend(drawer_protection) + if no_protection: + sorted_candidates.extend(no_protection) + + return sorted_candidates + + # Determine the appropriate candidate list + for candidate in get_candidate_drive_request(): + + # Evaluate candidates for required drive count, collective drive usable capacity and minimum drive size + if self.criteria_drive_count: + if self.criteria_drive_count != int(candidate["driveCount"]): + continue + if self.criteria_min_usable_capacity: + if ((self.raid_level == "raidDiskPool" and self.criteria_min_usable_capacity > + self.get_ddp_capacity(candidate["driveRefList"]["driveRef"])) or + self.criteria_min_usable_capacity > int(candidate["usableSize"])): + continue + if self.criteria_drive_min_size: + if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["driveRefList"]["driveRef"])): + continue + + return candidate + + self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid) + + @memoize + def get_expansion_candidate_drives(self): + """Retrieve required expansion drive list. + + Note: To satisfy the expansion criteria each item in the candidate list must added specified group since there + is a potential limitation on how many drives can be incorporated at a time. + * Traditional raid volume groups must be added two drives maximum at a time. No limits on raid disk pools. + + :return list(candidate): list of candidate structures from the getVolumeGroupExpansionCandidates symbol endpoint + """ + + def get_expansion_candidate_drive_request(): + """Perform the request for expanding existing volume groups or disk pools. + + Note: the list of candidate structures do not necessarily produce candidates that meet all criteria. + """ + candidates_list = None + url = "storage-systems/%s/symbol/getVolumeGroupExpansionCandidates?verboseErrorResponse=true" % self.ssid + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/getDiskPoolExpansionCandidates?verboseErrorResponse=true" % self.ssid + + try: + rc, candidates_list = self.request(url, method="POST", data=self.pool_detail["id"]) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve volume candidates. Array [%s]. Error [%s]." + % (self.ssid, to_native(error))) + + return candidates_list["candidates"] + + required_candidate_list = list() + required_additional_drives = 0 + required_additional_capacity = 0 + total_required_capacity = 0 + + # determine whether and how much expansion is need to satisfy the specified criteria + if self.criteria_min_usable_capacity: + total_required_capacity = self.criteria_min_usable_capacity + required_additional_capacity = self.criteria_min_usable_capacity - int(self.pool_detail["totalRaidedSpace"]) + + if self.criteria_drive_count: + required_additional_drives = self.criteria_drive_count - len(self.storage_pool_drives) + + # Determine the appropriate expansion candidate list + if required_additional_drives > 0 or required_additional_capacity > 0: + for candidate in get_expansion_candidate_drive_request(): + + if self.criteria_drive_min_size: + if self.criteria_drive_min_size > min(self.get_available_drive_capacities(candidate["drives"])): + continue + + if self.raid_level == "raidDiskPool": + if (len(candidate["drives"]) >= required_additional_drives and + self.get_ddp_capacity(candidate["drives"]) >= total_required_capacity): + required_candidate_list.append(candidate) + break + else: + required_additional_drives -= len(candidate["drives"]) + required_additional_capacity -= int(candidate["usableCapacity"]) + required_candidate_list.append(candidate) + + # Determine if required drives and capacities are satisfied + if required_additional_drives <= 0 and required_additional_capacity <= 0: + break + else: + self.module.fail_json(msg="Not enough drives to meet the specified criteria. Array [%s]." % self.ssid) + + return required_candidate_list + + def get_reserve_drive_count(self): + """Retrieve the current number of reserve drives for raidDiskPool (Only for raidDiskPool).""" + + if not self.pool_detail: + self.module.fail_json(msg="The storage pool must exist. Array [%s]." % self.ssid) + + if self.raid_level != "raidDiskPool": + self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]." + % (self.pool_detail["id"], self.ssid)) + + return self.pool_detail["volumeGroupData"]["diskPoolData"]["reconstructionReservedDriveCount"] + + def get_maximum_reserve_drive_count(self): + """Retrieve the maximum number of reserve drives for storage pool (Only for raidDiskPool).""" + if self.raid_level != "raidDiskPool": + self.module.fail_json(msg="The storage pool must be a raidDiskPool. Pool [%s]. Array [%s]." + % (self.pool_detail["id"], self.ssid)) + + drives_ids = list() + + if self.pool_detail: + drives_ids.extend(self.storage_pool_drives) + for candidate in self.get_expansion_candidate_drives(): + drives_ids.extend((candidate["drives"])) + else: + candidate = self.get_candidate_drives() + drives_ids.extend(candidate["driveRefList"]["driveRef"]) + + drive_count = len(drives_ids) + maximum_reserve_drive_count = min(int(drive_count * 0.2 + 1), drive_count - 10) + if maximum_reserve_drive_count > 10: + maximum_reserve_drive_count = 10 + + return maximum_reserve_drive_count + + def set_reserve_drive_count(self, check_mode=False): + """Set the reserve drive count for raidDiskPool.""" + changed = False + + if self.raid_level == "raidDiskPool" and self.reserve_drive_count: + maximum_count = self.get_maximum_reserve_drive_count() + + if self.reserve_drive_count < 0 or self.reserve_drive_count > maximum_count: + self.module.fail_json(msg="Supplied reserve drive count is invalid or exceeds the maximum allowed. " + "Note that it may be necessary to wait for expansion operations to complete " + "before the adjusting the reserve drive count. Maximum [%s]. Array [%s]." + % (maximum_count, self.ssid)) + + if self.reserve_drive_count != self.get_reserve_drive_count(): + changed = True + + if not check_mode: + try: + rc, resp = self.request("storage-systems/%s/symbol/setDiskPoolReservedDriveCount" % self.ssid, + method="POST", data=dict(volumeGroupRef=self.pool_detail["id"], + newDriveCount=self.reserve_drive_count)) + except Exception as error: + self.module.fail_json(msg="Failed to set reserve drive count for disk pool. Disk Pool [%s]." + " Array [%s]." % (self.pool_detail["id"], self.ssid)) + + return changed + + def erase_all_available_secured_drives(self, check_mode=False): + """Erase all available drives that have encryption at rest feature enabled.""" + changed = False + drives_list = list() + for drive in self.drives: + if drive["available"] and drive["fdeEnabled"]: + changed = True + drives_list.append(drive["id"]) + + if drives_list and not check_mode: + try: + rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true" + % self.ssid, method="POST", data=dict(driveRef=drives_list)) + except Exception as error: + self.module.fail_json(msg="Failed to erase all secured drives. Array [%s]" % self.ssid) + + return changed + + def create_storage_pool(self): + """Create new storage pool.""" + url = "storage-systems/%s/symbol/createVolumeGroup?verboseErrorResponse=true" % self.ssid + request_body = dict(label=self.name, + candidate=self.get_candidate_drives()) + + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/createDiskPool?verboseErrorResponse=true" % self.ssid + + request_body.update( + dict(backgroundOperationPriority="useDefault", + criticalReconstructPriority="useDefault", + degradedReconstructPriority="useDefault", + poolUtilizationCriticalThreshold=65535, + poolUtilizationWarningThreshold=0)) + + if self.reserve_drive_count: + request_body.update(dict(volumeCandidateData=dict( + diskPoolVolumeCandidateData=dict(reconstructionReservedDriveCount=self.reserve_drive_count)))) + + try: + rc, resp = self.request(url, method="POST", data=request_body) + except Exception as error: + self.module.fail_json(msg="Failed to create storage pool. Array id [%s]. Error[%s]." + % (self.ssid, to_native(error))) + + # Update drive and storage pool information + self.pool_detail = self.storage_pool + + def delete_storage_pool(self): + """Delete storage pool.""" + storage_pool_drives = [drive["id"] for drive in self.storage_pool_drives if drive["fdeEnabled"]] + try: + delete_volumes_parameter = "?delete-volumes=true" if self.remove_volumes else "" + rc, resp = self.request("storage-systems/%s/storage-pools/%s%s" + % (self.ssid, self.pool_detail["id"], delete_volumes_parameter), method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." + % (self.pool_detail["id"], self.ssid, to_native(error))) + + if storage_pool_drives and self.erase_secured_drives: + try: + rc, resp = self.request("storage-systems/%s/symbol/reprovisionDrive?verboseErrorResponse=true" + % self.ssid, method="POST", data=dict(driveRef=storage_pool_drives)) + except Exception as error: + self.module.fail_json(msg="Failed to erase drives prior to creating new storage pool. Array [%s]." + " Error [%s]." % (self.ssid, to_native(error))) + + def secure_storage_pool(self, check_mode=False): + """Enable security on an existing storage pool""" + self.pool_detail = self.storage_pool + needs_secure_pool = False + + if not self.secure_pool and self.pool_detail["securityType"] == "enabled": + self.module.fail_json(msg="It is not possible to disable storage pool security! See array documentation.") + if self.secure_pool and self.pool_detail["securityType"] != "enabled": + needs_secure_pool = True + + if needs_secure_pool and not check_mode: + try: + rc, resp = self.request("storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail["id"]), + data=dict(securePool=True), method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to secure storage pool. Pool id [%s]. Array [%s]. Error" + " [%s]." % (self.pool_detail["id"], self.ssid, to_native(error))) + + self.pool_detail = self.storage_pool + return needs_secure_pool + + def migrate_raid_level(self, check_mode=False): + """Request storage pool raid level migration.""" + needs_migration = self.raid_level != self.pool_detail["raidLevel"] + if needs_migration and self.pool_detail["raidLevel"] == "raidDiskPool": + self.module.fail_json(msg="Raid level cannot be changed for disk pools") + + if needs_migration and not check_mode: + sp_raid_migrate_req = dict(raidLevel=self.raid_level) + + try: + rc, resp = self.request("storage-systems/%s/storage-pools/%s/raid-type-migration" + % (self.ssid, self.name), data=sp_raid_migrate_req, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to change the raid level of storage pool. Array id [%s]." + " Error[%s]." % (self.ssid, to_native(error))) + + self.pool_detail = self.storage_pool + return needs_migration + + def expand_storage_pool(self, check_mode=False): + """Add drives to existing storage pool. + + :return bool: whether drives were required to be added to satisfy the specified criteria.""" + expansion_candidate_list = self.get_expansion_candidate_drives() + changed_required = bool(expansion_candidate_list) + estimated_completion_time = 0.0 + + # build expandable groupings of traditional raid candidate + required_expansion_candidate_list = list() + while expansion_candidate_list: + subset = list() + while expansion_candidate_list and len(subset) < self.expandable_drive_count: + subset.extend(expansion_candidate_list.pop()["drives"]) + required_expansion_candidate_list.append(subset) + + if required_expansion_candidate_list and not check_mode: + url = "storage-systems/%s/symbol/startVolumeGroupExpansion?verboseErrorResponse=true" % self.ssid + if self.raid_level == "raidDiskPool": + url = "storage-systems/%s/symbol/startDiskPoolExpansion?verboseErrorResponse=true" % self.ssid + + while required_expansion_candidate_list: + candidate_drives_list = required_expansion_candidate_list.pop() + request_body = dict(volumeGroupRef=self.pool_detail["volumeGroupRef"], + driveRef=candidate_drives_list) + try: + rc, resp = self.request(url, method="POST", data=request_body) + except Exception as error: + rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress" + % (self.ssid, self.pool_detail["id"]), ignore_errors=True) + if rc == 200 and actions_resp: + actions = [action["currentAction"] for action in actions_resp + if action["volumeRef"] in self.storage_pool_volumes] + self.module.fail_json(msg="Failed to add drives to the storage pool possibly because of actions" + " in progress. Actions [%s]. Pool id [%s]. Array id [%s]. Error[%s]." + % (", ".join(actions), self.pool_detail["id"], self.ssid, + to_native(error))) + + self.module.fail_json(msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]." + " Error[%s]." % (self.pool_detail["id"], self.ssid, to_native(error))) + + # Wait for expansion completion unless it is the last request in the candidate list + if required_expansion_candidate_list: + for dummy in range(self.EXPANSION_TIMEOUT_SEC): + rc, actions_resp = self.request("storage-systems/%s/storage-pools/%s/action-progress" + % (self.ssid, self.pool_detail["id"]), ignore_errors=True) + if rc == 200: + for action in actions_resp: + if (action["volumeRef"] in self.storage_pool_volumes and + action["currentAction"] == "remappingDce"): + sleep(1) + estimated_completion_time = action["estimatedTimeToCompletion"] + break + else: + estimated_completion_time = 0.0 + break + + return changed_required, estimated_completion_time + + def apply(self): + """Apply requested state to storage array.""" + changed = False + + if self.state == "present": + if self.criteria_drive_count is None and self.criteria_min_usable_capacity is None: + self.module.fail_json(msg="One of criteria_min_usable_capacity or criteria_drive_count must be" + " specified.") + if self.criteria_drive_count and not self.is_drive_count_valid(self.criteria_drive_count): + self.module.fail_json(msg="criteria_drive_count must be valid for the specified raid level.") + + self.pool_detail = self.storage_pool + self.module.log(pformat(self.pool_detail)) + + if self.state == "present" and self.erase_secured_drives: + self.erase_all_available_secured_drives(check_mode=True) + + # Determine whether changes need to be applied to the storage array + if self.pool_detail: + + if self.state == "absent": + changed = True + + elif self.state == "present": + + if self.criteria_drive_count and self.criteria_drive_count < len(self.storage_pool_drives): + self.module.fail_json(msg="Failed to reduce the size of the storage pool. Array [%s]. Pool [%s]." + % (self.ssid, self.pool_detail["id"])) + + if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail["driveMediaType"]: + self.module.fail_json(msg="Failed! It is not possible to modify storage pool media type." + " Array [%s]. Pool [%s]." % (self.ssid, self.pool_detail["id"])) + + if (self.criteria_drive_require_da is not None and self.criteria_drive_require_da != + self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"]): + self.module.fail_json(msg="Failed! It is not possible to modify DA-capability. Array [%s]." + " Pool [%s]." % (self.ssid, self.pool_detail["id"])) + + # Evaluate current storage pool for required change. + needs_expansion, estimated_completion_time = self.expand_storage_pool(check_mode=True) + if needs_expansion: + changed = True + if self.migrate_raid_level(check_mode=True): + changed = True + if self.secure_storage_pool(check_mode=True): + changed = True + if self.set_reserve_drive_count(check_mode=True): + changed = True + + elif self.state == "present": + changed = True + + # Apply changes to storage array + msg = "No changes were required for the storage pool [%s]." + if changed and not self.module.check_mode: + if self.state == "present": + if self.erase_secured_drives: + self.erase_all_available_secured_drives() + + if self.pool_detail: + change_list = list() + + # Expansion needs to occur before raid level migration to account for any sizing needs. + expanded, estimated_completion_time = self.expand_storage_pool() + if expanded: + change_list.append("expanded") + if self.migrate_raid_level(): + change_list.append("raid migration") + if self.secure_storage_pool(): + change_list.append("secured") + if self.set_reserve_drive_count(): + change_list.append("adjusted reserve drive count") + + if change_list: + msg = "Following changes have been applied to the storage pool [%s]: " + ", ".join(change_list) + + if expanded: + msg += "\nThe expansion operation will complete in an estimated %s minutes."\ + % estimated_completion_time + else: + self.create_storage_pool() + msg = "Storage pool [%s] was created." + + if self.secure_storage_pool(): + msg = "Storage pool [%s] was created and secured." + if self.set_reserve_drive_count(): + msg += " Adjusted reserve drive count." + + elif self.pool_detail: + self.delete_storage_pool() + msg = "Storage pool [%s] removed." + + self.pool_detail = self.storage_pool + self.module.log(pformat(self.pool_detail)) + self.module.log(msg % self.name) + self.module.exit_json(msg=msg % self.name, changed=changed, **self.pool_detail) + + +def main(): + storage_pool = NetAppESeriesStoragePool() + storage_pool.apply() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py new file mode 100644 index 000000000..1e6e85886 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_syslog.py @@ -0,0 +1,286 @@ +#!/usr/bin/python + +# (c) 2018, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_syslog +short_description: NetApp E-Series manage syslog settings +description: + - Allow the syslog settings to be configured for an individual E-Series storage-system +version_added: '2.7' +author: Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + description: + - Add or remove the syslog server configuration for E-Series storage array. + - Existing syslog server configuration will be removed or updated when its address matches I(address). + - Fully qualified hostname that resolve to an IPv4 address that matches I(address) will not be + treated as a match. + choices: + - present + - absent + type: str + default: present + address: + description: + - The syslog server's IPv4 address or a fully qualified hostname. + - All existing syslog configurations will be removed when I(state=absent) and I(address=None). + type: str + port: + description: + - This is the port the syslog server is using. + default: 514 + type: int + protocol: + description: + - This is the transmission protocol the syslog server's using to receive syslog messages. + choices: + - udp + - tcp + - tls + default: udp + type: str + components: + description: + - The e-series logging components define the specific logs to transfer to the syslog server. + - At the time of writing, 'auditLog' is the only logging component but more may become available. + default: ["auditLog"] + type: list + test: + description: + - This forces a test syslog message to be sent to the stated syslog server. + - Only attempts transmission when I(state=present). + type: bool + default: no + log_path: + description: + - This argument specifies a local path for logging purposes. + type: str + required: no +notes: + - Check mode is supported. + - This API is currently only supported with the Embedded Web Services API v2.12 (bundled with + SANtricity OS 11.40.2) and higher. +""" + +EXAMPLES = """ + - name: Add two syslog server configurations to NetApp E-Series storage array. + netapp_e_syslog: + state: present + address: "{{ item }}" + port: 514 + protocol: tcp + component: "auditLog" + api_url: "10.1.1.1:8443" + api_username: "admin" + api_password: "myPass" + loop: + - "192.168.1.1" + - "192.168.1.100" +""" + +RETURN = """ +msg: + description: Success message + returned: on success + type: str + sample: The settings have been updated. +syslog: + description: + - True if syslog server configuration has been added to e-series storage array. + returned: on success + sample: True + type: bool +""" + +import json +import logging + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request, eseries_host_argument_spec +from ansible.module_utils._text import to_native + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +class Syslog(object): + def __init__(self): + argument_spec = eseries_host_argument_spec() + argument_spec.update(dict( + state=dict(choices=["present", "absent"], required=False, default="present"), + address=dict(type="str", required=False), + port=dict(type="int", default=514, required=False), + protocol=dict(choices=["tcp", "tls", "udp"], default="udp", required=False), + components=dict(type="list", required=False, default=["auditLog"]), + test=dict(type="bool", default=False, require=False), + log_path=dict(type="str", required=False), + )) + + required_if = [ + ["state", "present", ["address", "port", "protocol", "components"]], + ] + + mutually_exclusive = [ + ["test", "absent"], + ] + + self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if, + mutually_exclusive=mutually_exclusive) + args = self.module.params + + self.syslog = args["state"] in ["present"] + self.address = args["address"] + self.port = args["port"] + self.protocol = args["protocol"] + self.components = args["components"] + self.test = args["test"] + self.ssid = args["ssid"] + self.url = args["api_url"] + self.creds = dict(url_password=args["api_password"], + validate_certs=args["validate_certs"], + url_username=args["api_username"], ) + + self.components.sort() + + self.check_mode = self.module.check_mode + + # logging setup + log_path = args["log_path"] + self._logger = logging.getLogger(self.__class__.__name__) + if log_path: + logging.basicConfig( + level=logging.DEBUG, filename=log_path, filemode='w', + format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s') + + if not self.url.endswith('/'): + self.url += '/' + + def get_configuration(self): + """Retrieve existing syslog configuration.""" + try: + (rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid), + headers=HEADERS, **self.creds) + return result + except Exception as err: + self.module.fail_json(msg="Failed to retrieve syslog configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def test_configuration(self, body): + """Send test syslog message to the storage array. + + Allows fix number of retries to occur before failure is issued to give the storage array time to create + new syslog server record. + """ + try: + (rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}/test".format(self.ssid, body["id"]), + method='POST', headers=HEADERS, **self.creds) + except Exception as err: + self.module.fail_json( + msg="We failed to send test message! Array Id [{0}]. Error [{1}].".format(self.ssid, to_native(err))) + + def update_configuration(self): + """Post the syslog request to array.""" + config_match = None + perfect_match = None + update = False + body = dict() + + # search existing configuration for syslog server entry match + configs = self.get_configuration() + if self.address: + for config in configs: + if config["serverAddress"] == self.address: + config_match = config + if (config["port"] == self.port and config["protocol"] == self.protocol and + len(config["components"]) == len(self.components) and + all([component["type"] in self.components for component in config["components"]])): + perfect_match = config_match + break + + # generate body for the http request + if self.syslog: + if not perfect_match: + update = True + if config_match: + body.update(dict(id=config_match["id"])) + components = [dict(type=component_type) for component_type in self.components] + body.update(dict(serverAddress=self.address, port=self.port, + protocol=self.protocol, components=components)) + self._logger.info(body) + self.make_configuration_request(body) + + # remove specific syslog server configuration + elif self.address: + update = True + body.update(dict(id=config_match["id"])) + self._logger.info(body) + self.make_configuration_request(body) + + # if no address is specified, remove all syslog server configurations + elif configs: + update = True + for config in configs: + body.update(dict(id=config["id"])) + self._logger.info(body) + self.make_configuration_request(body) + + return update + + def make_configuration_request(self, body): + # make http request(s) + if not self.check_mode: + try: + if self.syslog: + if "id" in body: + (rc, result) = request( + self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]), + method='POST', data=json.dumps(body), headers=HEADERS, **self.creds) + else: + (rc, result) = request(self.url + "storage-systems/{0}/syslog".format(self.ssid), + method='POST', data=json.dumps(body), headers=HEADERS, **self.creds) + body.update(result) + + # send syslog test message + if self.test: + self.test_configuration(body) + + elif "id" in body: + (rc, result) = request(self.url + "storage-systems/{0}/syslog/{1}".format(self.ssid, body["id"]), + method='DELETE', headers=HEADERS, **self.creds) + + # This is going to catch cases like a connection failure + except Exception as err: + self.module.fail_json(msg="We failed to modify syslog configuration! Array Id [%s]. Error [%s]." + % (self.ssid, to_native(err))) + + def update(self): + """Update configuration and respond to ansible.""" + update = self.update_configuration() + self.module.exit_json(msg="The syslog settings have been updated.", changed=update) + + def __call__(self, *args, **kwargs): + self.update() + + +def main(): + settings = Syslog() + settings() + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py new file mode 100644 index 000000000..dd388e612 --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume.py @@ -0,0 +1,868 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_volume +version_added: "2.2" +short_description: NetApp E-Series manage storage volumes (standard and thin) +description: + - Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays. +author: + - Kevin Hulquest (@hulquest) + - Nathan Swartz (@ndswartz) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + state: + description: + - Whether the specified volume should exist + required: true + type: str + choices: ['present', 'absent'] + name: + description: + - The name of the volume to manage. + type: str + required: true + storage_pool_name: + description: + - Required only when requested I(state=='present'). + - Name of the storage pool wherein the volume should reside. + type: str + required: false + size_unit: + description: + - The unit used to interpret the size parameter + choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] + type: str + default: 'gb' + size: + description: + - Required only when I(state=='present'). + - Size of the volume in I(size_unit). + - Size of the virtual volume in the case of a thin volume in I(size_unit). + - Maximum virtual volume size of a thin provisioned volume is 256tb; however other OS-level restrictions may + exist. + type: float + required: true + segment_size_kb: + description: + - Segment size of the volume + - All values are in kibibytes. + - Some common choices include '8', '16', '32', '64', '128', '256', and '512' but options are system + dependent. + - Retrieve the definitive system list from M(netapp_e_facts) under segment_sizes. + - When the storage pool is a raidDiskPool then the segment size must be 128kb. + - Segment size migrations are not allowed in this module + type: int + default: '128' + thin_provision: + description: + - Whether the volume should be thin provisioned. + - Thin volumes can only be created when I(raid_level=="raidDiskPool"). + - Generally, use of thin-provisioning is not recommended due to performance impacts. + type: bool + default: false + thin_volume_repo_size: + description: + - This value (in size_unit) sets the allocated space for the thin provisioned repository. + - Initial value must between or equal to 4gb and 256gb in increments of 4gb. + - During expansion operations the increase must be between or equal to 4gb and 256gb in increments of 4gb. + - This option has no effect during expansion if I(thin_volume_expansion_policy=="automatic"). + - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic). + type: int + required: false + thin_volume_max_repo_size: + description: + - This is the maximum amount the thin volume repository will be allowed to grow. + - Only has significance when I(thin_volume_expansion_policy=="automatic"). + - When the percentage I(thin_volume_repo_size) of I(thin_volume_max_repo_size) exceeds + I(thin_volume_growth_alert_threshold) then a warning will be issued and the storage array will execute + the I(thin_volume_expansion_policy) policy. + - Expansion operations when I(thin_volume_expansion_policy=="automatic") will increase the maximum + repository size. + - The default will be the same as size (in size_unit) + type: float + thin_volume_expansion_policy: + description: + - This is the thin volume expansion policy. + - When I(thin_volume_expansion_policy=="automatic") and I(thin_volume_growth_alert_threshold) is exceed the + I(thin_volume_max_repo_size) will be automatically expanded. + - When I(thin_volume_expansion_policy=="manual") and I(thin_volume_growth_alert_threshold) is exceeded the + storage system will wait for manual intervention. + - The thin volume_expansion policy can not be modified on existing thin volumes in this module. + - Generally speaking you should almost always use I(thin_volume_expansion_policy=="automatic). + choices: ["automatic", "manual"] + default: "automatic" + type: str + version_added: 2.8 + thin_volume_growth_alert_threshold: + description: + - This is the thin provision repository utilization threshold (in percent). + - When the percentage of used storage of the maximum repository size exceeds this value then a alert will + be issued and the I(thin_volume_expansion_policy) will be executed. + - Values must be between or equal to 10 and 99. + default: 95 + type: int + version_added: 2.8 + owning_controller: + description: + - Specifies which controller will be the primary owner of the volume + - Not specifying will allow the controller to choose ownership. + required: false + choices: ["A", "B"] + type: str + version_added: 2.9 + ssd_cache_enabled: + description: + - Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined) + - The default value is to ignore existing SSD cache setting. + type: bool + default: false + data_assurance_enabled: + description: + - Determines whether data assurance (DA) should be enabled for the volume + - Only available when creating a new volume and on a storage pool with drives supporting the DA capability. + type: bool + default: false + read_cache_enable: + description: + - Indicates whether read caching should be enabled for the volume. + type: bool + default: true + version_added: 2.8 + read_ahead_enable: + description: + - Indicates whether or not automatic cache read-ahead is enabled. + - This option has no effect on thinly provisioned volumes since the architecture for thin volumes cannot + benefit from read ahead caching. + type: bool + default: true + version_added: 2.8 + write_cache_enable: + description: + - Indicates whether write-back caching should be enabled for the volume. + type: bool + default: true + version_added: 2.8 + cache_without_batteries: + description: + - Indicates whether caching should be used without battery backup. + - Warning, M(cache_without_batteries==true) and the storage system looses power and there is no battery backup, data will be lost! + type: bool + default: false + version_added: 2.9 + workload_name: + description: + - Label for the workload defined by the metadata. + - When I(workload_name) and I(metadata) are specified then the defined workload will be added to the storage + array. + - When I(workload_name) exists on the storage array but the metadata is different then the workload + definition will be updated. (Changes will update all associated volumes!) + - Existing workloads can be retrieved using M(netapp_e_facts). + required: false + type: str + version_added: 2.8 + metadata: + description: + - Dictionary containing meta data for the use, user, location, etc of the volume (dictionary is arbitrarily + defined for whatever the user deems useful) + - When I(workload_name) exists on the storage array but the metadata is different then the workload + definition will be updated. (Changes will update all associated volumes!) + - I(workload_name) must be specified when I(metadata) are defined. + type: dict + required: false + version_added: 2.8 + wait_for_initialization: + description: + - Forces the module to wait for expansion operations to complete before continuing. + type: bool + default: false + version_added: 2.8 + initialization_timeout: + description: + - Duration in seconds before the wait_for_initialization operation will terminate. + - M(wait_for_initialization==True) to have any effect on module's operations. + type: int + required: false + version_added: 2.9 +""" +EXAMPLES = """ +- name: Create simple volume with workload tags (volume meta data) + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present + name: volume + storage_pool_name: storage_pool + size: 300 + size_unit: gb + workload_name: volume_tag + metadata: + key1: value1 + key2: value2 +- name: Create a thin volume + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present + name: volume1 + storage_pool_name: storage_pool + size: 131072 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 1024 +- name: Expand thin volume's virtual size + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present + name: volume1 + storage_pool_name: storage_pool + size: 262144 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 1024 +- name: Expand thin volume's maximum repository size + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: present + name: volume1 + storage_pool_name: storage_pool + size: 262144 + size_unit: gb + thin_provision: true + thin_volume_repo_size: 32 + thin_volume_max_repo_size: 2048 +- name: Delete volume + netapp_e_volume: + ssid: "{{ ssid }}" + api_url: "{{ netapp_api_url }}" + api_username: "{{ netapp_api_username }}" + api_password: "{{ netapp_api_password }}" + validate_certs: "{{ netapp_api_validate_certs }}" + state: absent + name: volume +""" +RETURN = """ +msg: + description: State of volume + type: str + returned: always + sample: "Standard volume [workload_vol_1] has been created." +""" +from time import sleep +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import NetAppESeriesModule +from ansible.module_utils._text import to_native + + +class NetAppESeriesVolume(NetAppESeriesModule): + VOLUME_CREATION_BLOCKING_TIMEOUT_SEC = 300 + + def __init__(self): + ansible_options = dict( + state=dict(required=True, choices=["present", "absent"]), + name=dict(required=True, type="str"), + storage_pool_name=dict(type="str"), + size_unit=dict(default="gb", choices=["bytes", "b", "kb", "mb", "gb", "tb", "pb", "eb", "zb", "yb"], + type="str"), + size=dict(type="float"), + segment_size_kb=dict(type="int", default=128), + owning_controller=dict(required=False, choices=['A', 'B']), + ssd_cache_enabled=dict(type="bool", default=False), + data_assurance_enabled=dict(type="bool", default=False), + thin_provision=dict(type="bool", default=False), + thin_volume_repo_size=dict(type="int"), + thin_volume_max_repo_size=dict(type="float"), + thin_volume_expansion_policy=dict(type="str", choices=["automatic", "manual"], default="automatic"), + thin_volume_growth_alert_threshold=dict(type="int", default=95), + read_cache_enable=dict(type="bool", default=True), + read_ahead_enable=dict(type="bool", default=True), + write_cache_enable=dict(type="bool", default=True), + cache_without_batteries=dict(type="bool", default=False), + workload_name=dict(type="str", required=False), + metadata=dict(type="dict", require=False), + wait_for_initialization=dict(type="bool", default=False), + initialization_timeout=dict(type="int", required=False)) + + required_if = [ + ["state", "present", ["storage_pool_name", "size"]], + ["thin_provision", "true", ["thin_volume_repo_size"]] + ] + + super(NetAppESeriesVolume, self).__init__(ansible_options=ansible_options, + web_services_version="02.00.0000.0000", + supports_check_mode=True, + required_if=required_if) + + args = self.module.params + self.state = args["state"] + self.name = args["name"] + self.storage_pool_name = args["storage_pool_name"] + self.size_unit = args["size_unit"] + self.segment_size_kb = args["segment_size_kb"] + if args["size"]: + self.size_b = self.convert_to_aligned_bytes(args["size"]) + + self.owning_controller_id = None + if args["owning_controller"]: + self.owning_controller_id = "070000000000000000000001" if args["owning_controller"] == "A" else "070000000000000000000002" + + self.read_cache_enable = args["read_cache_enable"] + self.read_ahead_enable = args["read_ahead_enable"] + self.write_cache_enable = args["write_cache_enable"] + self.ssd_cache_enabled = args["ssd_cache_enabled"] + self.cache_without_batteries = args["cache_without_batteries"] + self.data_assurance_enabled = args["data_assurance_enabled"] + + self.thin_provision = args["thin_provision"] + self.thin_volume_expansion_policy = args["thin_volume_expansion_policy"] + self.thin_volume_growth_alert_threshold = int(args["thin_volume_growth_alert_threshold"]) + self.thin_volume_repo_size_b = None + self.thin_volume_max_repo_size_b = None + + if args["thin_volume_repo_size"]: + self.thin_volume_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_repo_size"]) + if args["thin_volume_max_repo_size"]: + self.thin_volume_max_repo_size_b = self.convert_to_aligned_bytes(args["thin_volume_max_repo_size"]) + + self.workload_name = args["workload_name"] + self.metadata = args["metadata"] + self.wait_for_initialization = args["wait_for_initialization"] + self.initialization_timeout = args["initialization_timeout"] + + # convert metadata to a list of dictionaries containing the keys "key" and "value" corresponding to + # each of the workload attributes dictionary entries + metadata = [] + if self.metadata: + if not self.workload_name: + self.module.fail_json(msg="When metadata is specified then the name for the workload must be specified." + " Array [%s]." % self.ssid) + for key in self.metadata.keys(): + metadata.append(dict(key=key, value=self.metadata[key])) + self.metadata = metadata + + if self.thin_provision: + if not self.thin_volume_max_repo_size_b: + self.thin_volume_max_repo_size_b = self.size_b + + if not self.thin_volume_expansion_policy: + self.thin_volume_expansion_policy = "automatic" + + if self.size_b > 256 * 1024 ** 4: + self.module.fail_json(msg="Thin provisioned volumes must be less than or equal to 256tb is size." + " Attempted size [%sg]" % (self.size_b * 1024 ** 3)) + + if (self.thin_volume_repo_size_b and self.thin_volume_max_repo_size_b and + self.thin_volume_repo_size_b > self.thin_volume_max_repo_size_b): + self.module.fail_json(msg="The initial size of the thin volume must not be larger than the maximum" + " repository size. Array [%s]." % self.ssid) + + if self.thin_volume_growth_alert_threshold < 10 or self.thin_volume_growth_alert_threshold > 99: + self.module.fail_json(msg="thin_volume_growth_alert_threshold must be between or equal to 10 and 99." + "thin_volume_growth_alert_threshold [%s]. Array [%s]." + % (self.thin_volume_growth_alert_threshold, self.ssid)) + + self.volume_detail = None + self.pool_detail = None + self.workload_id = None + + def convert_to_aligned_bytes(self, size): + """Convert size to the truncated byte size that aligns on the segment size.""" + size_bytes = int(size * self.SIZE_UNIT_MAP[self.size_unit]) + segment_size_bytes = int(self.segment_size_kb * self.SIZE_UNIT_MAP["kb"]) + segment_count = int(size_bytes / segment_size_bytes) + return segment_count * segment_size_bytes + + def get_volume(self): + """Retrieve volume details from storage array.""" + volumes = list() + thin_volumes = list() + try: + rc, volumes = self.request("storage-systems/%s/volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of thick volumes. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + try: + rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + + volume_detail = [volume for volume in volumes + thin_volumes if volume["name"] == self.name] + return volume_detail[0] if volume_detail else dict() + + def wait_for_volume_availability(self, retries=VOLUME_CREATION_BLOCKING_TIMEOUT_SEC / 5): + """Waits until volume becomes available. + + :raises AnsibleFailJson when retries are exhausted. + """ + if retries == 0: + self.module.fail_json(msg="Timed out waiting for the volume %s to become available. Array [%s]." + % (self.name, self.ssid)) + if not self.get_volume(): + sleep(5) + self.wait_for_volume_availability(retries=retries - 1) + + def wait_for_volume_action(self, timeout=None): + """Waits until volume action is complete is complete. + :param: int timeout: Wait duration measured in seconds. Waits indefinitely when None. + """ + action = "unknown" + percent_complete = None + while action != "complete": + sleep(5) + + try: + rc, operations = self.request("storage-systems/%s/symbol/getLongLivedOpsProgress" % self.ssid) + + # Search long lived operations for volume + action = "complete" + for operation in operations["longLivedOpsProgress"]: + if operation["volAction"] is not None: + for key in operation.keys(): + if (operation[key] is not None and "volumeRef" in operation[key] and + (operation[key]["volumeRef"] == self.volume_detail["id"] or + ("storageVolumeRef" in self.volume_detail and operation[key]["volumeRef"] == self.volume_detail["storageVolumeRef"]))): + action = operation["volAction"] + percent_complete = operation["init"]["percentComplete"] + except Exception as err: + self.module.fail_json(msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(err))) + + if timeout is not None: + if timeout <= 0: + self.module.warn("Expansion action, %s, failed to complete during the allotted time. Time remaining" + " [%s]. Array Id [%s]." % (action, percent_complete, self.ssid)) + self.module.fail_json(msg="Expansion action failed to complete. Time remaining [%s]. Array Id [%s]." % (percent_complete, self.ssid)) + if timeout: + timeout -= 5 + + self.module.log("Expansion action, %s, is %s complete." % (action, percent_complete)) + self.module.log("Expansion action is complete.") + + def get_storage_pool(self): + """Retrieve storage pool details from the storage array.""" + storage_pools = list() + try: + rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid) + except Exception as err: + self.module.fail_json(msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." + % (self.ssid, to_native(err))) + + pool_detail = [storage_pool for storage_pool in storage_pools if storage_pool["name"] == self.storage_pool_name] + return pool_detail[0] if pool_detail else dict() + + def check_storage_pool_sufficiency(self): + """Perform a series of checks as to the sufficiency of the storage pool for the volume.""" + if not self.pool_detail: + self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name) + + if not self.volume_detail: + if self.thin_provision and not self.pool_detail['diskPool']: + self.module.fail_json(msg='Thin provisioned volumes can only be created on raid disk pools.') + + if (self.data_assurance_enabled and not + (self.pool_detail["protectionInformationCapabilities"]["protectionInformationCapable"] and + self.pool_detail["protectionInformationCapabilities"]["protectionType"] == "type2Protection")): + self.module.fail_json(msg="Data Assurance (DA) requires the storage pool to be DA-compatible." + " Array [%s]." % self.ssid) + + if int(self.pool_detail["freeSpace"]) < self.size_b and not self.thin_provision: + self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs." + " Array [%s]." % self.ssid) + else: + # Check for expansion + if (int(self.pool_detail["freeSpace"]) < int(self.volume_detail["totalSizeInBytes"]) - self.size_b and + not self.thin_provision): + self.module.fail_json(msg="Not enough storage pool free space available for the volume's needs." + " Array [%s]." % self.ssid) + + def update_workload_tags(self, check_mode=False): + """Check the status of the workload tag and update storage array definitions if necessary. + + When the workload attributes are not provided but an existing workload tag name is, then the attributes will be + used. + + :return bool: Whether changes were required to be made.""" + change_required = False + workload_tags = None + request_body = None + ansible_profile_id = None + + if self.workload_name: + try: + rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid) + except Exception as error: + self.module.fail_json(msg="Failed to retrieve storage array workload tags. Array [%s]" % self.ssid) + + # Generate common indexed Ansible workload tag + current_tag_index_list = [int(pair["value"].replace("ansible_workload_", "")) + for tag in workload_tags for pair in tag["workloadAttributes"] + if pair["key"] == "profileId" and "ansible_workload_" in pair["value"] and + str(pair["value"]).replace("ansible_workload_", "").isdigit()] + + tag_index = 1 + if current_tag_index_list: + tag_index = max(current_tag_index_list) + 1 + + ansible_profile_id = "ansible_workload_%d" % tag_index + request_body = dict(name=self.workload_name, + profileId=ansible_profile_id, + workloadInstanceIndex=None, + isValid=True) + + # evaluate and update storage array when needed + for tag in workload_tags: + if tag["name"] == self.workload_name: + self.workload_id = tag["id"] + + if not self.metadata: + break + + # Determine if core attributes (everything but profileId) is the same + metadata_set = set(tuple(sorted(attr.items())) for attr in self.metadata) + tag_set = set(tuple(sorted(attr.items())) + for attr in tag["workloadAttributes"] if attr["key"] != "profileId") + if metadata_set != tag_set: + self.module.log("Workload tag change is required!") + change_required = True + + # only perform the required action when check_mode==False + if change_required and not check_mode: + self.metadata.append(dict(key="profileId", value=ansible_profile_id)) + request_body.update(dict(isNewWorkloadInstance=False, + isWorkloadDataInitialized=True, + isWorkloadCardDataToBeReset=True, + workloadAttributes=self.metadata)) + try: + rc, resp = self.request("storage-systems/%s/workloads/%s" % (self.ssid, tag["id"]), + data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]" + % (self.ssid, to_native(error))) + self.module.log("Workload tag [%s] required change." % self.workload_name) + break + + # existing workload tag not found so create new workload tag + else: + change_required = True + self.module.log("Workload tag creation is required!") + + if change_required and not check_mode: + if self.metadata: + self.metadata.append(dict(key="profileId", value=ansible_profile_id)) + else: + self.metadata = [dict(key="profileId", value=ansible_profile_id)] + + request_body.update(dict(isNewWorkloadInstance=True, + isWorkloadDataInitialized=False, + isWorkloadCardDataToBeReset=False, + workloadAttributes=self.metadata)) + try: + rc, resp = self.request("storage-systems/%s/workloads" % self.ssid, + method="POST", data=request_body) + self.workload_id = resp["id"] + except Exception as error: + self.module.fail_json(msg="Failed to create new workload tag. Array [%s]. Error [%s]" + % (self.ssid, to_native(error))) + self.module.log("Workload tag [%s] was added." % self.workload_name) + + return change_required + + def get_volume_property_changes(self): + """Retrieve the volume update request body when change(s) are required. + + :raise AnsibleFailJson when attempting to change segment size on existing volume. + :return dict: request body when change(s) to a volume's properties are required. + """ + change = False + request_body = dict(flashCache=self.ssd_cache_enabled, metaTags=[], + cacheSettings=dict(readCacheEnable=self.read_cache_enable, + writeCacheEnable=self.write_cache_enable)) + + # check for invalid modifications + if self.segment_size_kb * 1024 != int(self.volume_detail["segmentSize"]): + self.module.fail_json(msg="Existing volume segment size is %s and cannot be modified." + % self.volume_detail["segmentSize"]) + + # common thick/thin volume properties + if (self.read_cache_enable != self.volume_detail["cacheSettings"]["readCacheEnable"] or + self.write_cache_enable != self.volume_detail["cacheSettings"]["writeCacheEnable"] or + self.ssd_cache_enabled != self.volume_detail["flashCached"]): + change = True + + # controller ownership + if self.owning_controller_id and self.owning_controller_id != self.volume_detail["preferredManager"]: + change = True + request_body.update(dict(owningControllerId=self.owning_controller_id)) + + if self.workload_name: + request_body.update(dict(metaTags=[dict(key="workloadId", value=self.workload_id), + dict(key="volumeTypeId", value="volume")])) + if {"key": "workloadId", "value": self.workload_id} not in self.volume_detail["metadata"]: + change = True + elif self.volume_detail["metadata"]: + change = True + + # thick/thin volume specific properties + if self.thin_provision: + if self.thin_volume_growth_alert_threshold != int(self.volume_detail["growthAlertThreshold"]): + change = True + request_body.update(dict(growthAlertThreshold=self.thin_volume_growth_alert_threshold)) + if self.thin_volume_expansion_policy != self.volume_detail["expansionPolicy"]: + change = True + request_body.update(dict(expansionPolicy=self.thin_volume_expansion_policy)) + else: + if self.read_ahead_enable != (int(self.volume_detail["cacheSettings"]["readAheadMultiplier"]) > 0): + change = True + request_body["cacheSettings"].update(dict(readAheadEnable=self.read_ahead_enable)) + if self.cache_without_batteries != self.volume_detail["cacheSettings"]["cwob"]: + change = True + request_body["cacheSettings"].update(dict(cacheWithoutBatteries=self.cache_without_batteries)) + + return request_body if change else dict() + + def get_expand_volume_changes(self): + """Expand the storage specifications for the existing thick/thin volume. + + :raise AnsibleFailJson when a thick/thin volume expansion request fails. + :return dict: dictionary containing all the necessary values for volume expansion request + """ + request_body = dict() + + if self.size_b < int(self.volume_detail["capacity"]): + self.module.fail_json(msg="Reducing the size of volumes is not permitted. Volume [%s]. Array [%s]" + % (self.name, self.ssid)) + + if self.volume_detail["thinProvisioned"]: + if self.size_b > int(self.volume_detail["capacity"]): + request_body.update(dict(sizeUnit="bytes", newVirtualSize=self.size_b)) + self.module.log("Thin volume virtual size have been expanded.") + + if self.volume_detail["expansionPolicy"] == "automatic": + if self.thin_volume_max_repo_size_b > int(self.volume_detail["provisionedCapacityQuota"]): + request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_max_repo_size_b)) + self.module.log("Thin volume maximum repository size have been expanded (automatic policy).") + + elif self.volume_detail["expansionPolicy"] == "manual": + if self.thin_volume_repo_size_b > int(self.volume_detail["currentProvisionedCapacity"]): + change = self.thin_volume_repo_size_b - int(self.volume_detail["currentProvisionedCapacity"]) + if change < 4 * 1024 ** 3 or change > 256 * 1024 ** 3 or change % (4 * 1024 ** 3) != 0: + self.module.fail_json(msg="The thin volume repository increase must be between or equal to 4gb" + " and 256gb in increments of 4gb. Attempted size [%sg]." + % (self.thin_volume_repo_size_b * 1024 ** 3)) + + request_body.update(dict(sizeUnit="bytes", newRepositorySize=self.thin_volume_repo_size_b)) + self.module.log("Thin volume maximum repository size have been expanded (manual policy).") + + elif self.size_b > int(self.volume_detail["capacity"]): + request_body.update(dict(sizeUnit="bytes", expansionSize=self.size_b)) + self.module.log("Volume storage capacities have been expanded.") + + return request_body + + def create_volume(self): + """Create thick/thin volume according to the specified criteria.""" + body = dict(name=self.name, poolId=self.pool_detail["id"], sizeUnit="bytes", + dataAssuranceEnabled=self.data_assurance_enabled) + + if self.thin_provision: + body.update(dict(virtualSize=self.size_b, + repositorySize=self.thin_volume_repo_size_b, + maximumRepositorySize=self.thin_volume_max_repo_size_b, + expansionPolicy=self.thin_volume_expansion_policy, + growthAlertThreshold=self.thin_volume_growth_alert_threshold)) + try: + rc, volume = self.request("storage-systems/%s/thin-volumes" % self.ssid, data=body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + + self.module.log("New thin volume created [%s]." % self.name) + + else: + body.update(dict(size=self.size_b, segSize=self.segment_size_kb)) + try: + rc, volume = self.request("storage-systems/%s/volumes" % self.ssid, data=body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + + self.module.log("New volume created [%s]." % self.name) + + def update_volume_properties(self): + """Update existing thin-volume or volume properties. + + :raise AnsibleFailJson when either thick/thin volume update request fails. + :return bool: whether update was applied + """ + self.wait_for_volume_availability() + self.volume_detail = self.get_volume() + + request_body = self.get_volume_property_changes() + + if request_body: + if self.thin_provision: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s" + % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to update thin volume properties. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(error))) + else: + try: + rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]), + data=request_body, method="POST") + except Exception as error: + self.module.fail_json(msg="Failed to update volume properties. Volume [%s]. Array Id [%s]." + " Error[%s]." % (self.name, self.ssid, to_native(error))) + return True + return False + + def expand_volume(self): + """Expand the storage specifications for the existing thick/thin volume. + + :raise AnsibleFailJson when a thick/thin volume expansion request fails. + """ + request_body = self.get_expand_volume_changes() + if request_body: + if self.volume_detail["thinProvisioned"]: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s/expand" + % (self.ssid, self.volume_detail["id"]), data=request_body, method="POST") + except Exception as err: + self.module.fail_json(msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(err))) + self.module.log("Thin volume specifications have been expanded.") + + else: + try: + rc, resp = self.request( + "storage-systems/%s/volumes/%s/expand" % (self.ssid, self.volume_detail['id']), + data=request_body, method="POST") + except Exception as err: + self.module.fail_json(msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(err))) + + self.module.log("Volume storage capacities have been expanded.") + + def delete_volume(self): + """Delete existing thin/thick volume.""" + if self.thin_provision: + try: + rc, resp = self.request("storage-systems/%s/thin-volumes/%s" % (self.ssid, self.volume_detail["id"]), + method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete thin volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + self.module.log("Thin volume deleted [%s]." % self.name) + else: + try: + rc, resp = self.request("storage-systems/%s/volumes/%s" % (self.ssid, self.volume_detail["id"]), + method="DELETE") + except Exception as error: + self.module.fail_json(msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." + % (self.name, self.ssid, to_native(error))) + self.module.log("Volume deleted [%s]." % self.name) + + def apply(self): + """Determine and apply any changes necessary to satisfy the specified criteria. + + :raise AnsibleExitJson when completes successfully""" + change = False + msg = None + + self.volume_detail = self.get_volume() + self.pool_detail = self.get_storage_pool() + + # Determine whether changes need to be applied to existing workload tags + if self.state == 'present' and self.update_workload_tags(check_mode=True): + change = True + + # Determine if any changes need to be applied + if self.volume_detail: + if self.state == 'absent': + change = True + + elif self.state == 'present': + if self.get_expand_volume_changes() or self.get_volume_property_changes(): + change = True + + elif self.state == 'present': + if self.thin_provision and (self.thin_volume_repo_size_b < 4 * 1024 ** 3 or + self.thin_volume_repo_size_b > 256 * 1024 ** 3 or + self.thin_volume_repo_size_b % (4 * 1024 ** 3) != 0): + self.module.fail_json(msg="The initial thin volume repository size must be between 4gb and 256gb in" + " increments of 4gb. Attempted size [%sg]." + % (self.thin_volume_repo_size_b * 1024 ** 3)) + change = True + + self.module.log("Update required: [%s]." % change) + + # Apply any necessary changes + if change and not self.module.check_mode: + if self.state == 'present': + if self.update_workload_tags(): + msg = "Workload tag change occurred." + + if not self.volume_detail: + self.check_storage_pool_sufficiency() + self.create_volume() + self.update_volume_properties() + msg = msg[:-1] + " and volume [%s] was created." if msg else "Volume [%s] has been created." + else: + if self.update_volume_properties(): + msg = "Volume [%s] properties were updated." + + if self.get_expand_volume_changes(): + self.expand_volume() + msg = msg[:-1] + " and was expanded." if msg else "Volume [%s] was expanded." + + if self.wait_for_initialization: + self.module.log("Waiting for volume operation to complete.") + self.wait_for_volume_action(timeout=self.initialization_timeout) + + elif self.state == 'absent': + self.delete_volume() + msg = "Volume [%s] has been deleted." + + else: + msg = "Volume [%s] does not exist." if self.state == 'absent' else "Volume [%s] exists." + + self.module.exit_json(msg=(msg % self.name if msg and "%s" in msg else msg), changed=change) + + +def main(): + volume = NetAppESeriesVolume() + volume.apply() + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py new file mode 100644 index 000000000..a6748a54c --- /dev/null +++ b/ansible_collections/netapp_eseries/santricity/plugins/modules/netapp_e_volume_copy.py @@ -0,0 +1,431 @@ +#!/usr/bin/python + +# (c) 2016, NetApp, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['deprecated'], + 'supported_by': 'community'} + +DOCUMENTATION = """ +--- +module: netapp_e_volume_copy +short_description: NetApp E-Series create volume copy pairs +description: + - Create and delete snapshots images on volume groups for NetApp E-series storage arrays. +version_added: '2.2' +author: Kevin Hulquest (@hulquest) +extends_documentation_fragment: + - netapp_eseries.santricity.santricity.netapp.eseries +options: + ssid: + description: + - Storage system identifier + type: str + default: '1' + api_username: + required: true + description: + - The username to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + api_password: + required: true + description: + - The password to authenticate with the SANtricity WebServices Proxy or embedded REST API. + type: str + api_url: + required: true + description: + - The url to the SANtricity WebServices Proxy or embedded REST API, for example C(https://prod-1.wahoo.acme.com/devmgr/v2). + type: str + validate_certs: + required: false + default: true + type: bool + description: + - Should https certificates be validated? + source_volume_id: + description: + - The id of the volume copy source. + - If used, must be paired with destination_volume_id + - Mutually exclusive with volume_copy_pair_id, and search_volume_id + type: str + destination_volume_id: + description: + - The id of the volume copy destination. + - If used, must be paired with source_volume_id + - Mutually exclusive with volume_copy_pair_id, and search_volume_id + type: str + volume_copy_pair_id: + description: + - The id of a given volume copy pair + - Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id + - Can use to delete or check presence of volume pairs + - Must specify this or (destination_volume_id and source_volume_id) + type: str + state: + description: + - Whether the specified volume copy pair should exist or not. + required: True + choices: ['present', 'absent'] + type: str + create_copy_pair_if_does_not_exist: + description: + - Defines if a copy pair will be created if it does not exist. + - If set to True destination_volume_id and source_volume_id are required. + type: bool + default: True + start_stop_copy: + description: + - starts a re-copy or stops a copy in progress + - "Note: If you stop the initial file copy before it it done the copy pair will be destroyed" + - Requires volume_copy_pair_id + type: str + choices: ['start', 'stop'] + search_volume_id: + description: + - Searches for all valid potential target and source volumes that could be used in a copy_pair + - Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id + type: str + copy_priority: + description: + - Copy priority level + required: False + default: 0 + type: int + onlineCopy: + description: + - Whether copy should be online + required: False + default: False + type: bool + targetWriteProtected: + description: + - Whether target should be write protected + required: False + default: True + type: bool +""" +EXAMPLES = """ +--- +msg: + description: Success message + returned: success + type: str + sample: Json facts for the volume copy that was created. +""" +RETURN = """ +msg: + description: Success message + returned: success + type: str + sample: Created Volume Copy Pair with ID +""" + +import json + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils._text import to_native +from ansible_collections.netapp_eseries.santricity.plugins.module_utils.netapp import request + +HEADERS = { + "Content-Type": "application/json", + "Accept": "application/json", +} + + +def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid'] + url = params['api_url'] + get_status + + (rc, resp) = request(url, method='GET', url_username=params['api_username'], + url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + + volume_copy_pair_id = None + for potential_copy_pair in resp: + if potential_copy_pair['sourceVolume'] == params['source_volume_id']: + if potential_copy_pair['sourceVolume'] == params['source_volume_id']: + volume_copy_pair_id = potential_copy_pair['id'] + + return volume_copy_pair_id + + +def create_copy_pair(params): + get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid'] + url = params['api_url'] + get_status + + rData = { + "sourceId": params['source_volume_id'], + "targetId": params['destination_volume_id'] + } + + (rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 200: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def delete_copy_pair_by_copy_pair_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (rc, resp) = request(url, ignore_errors=True, method='DELETE', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 204: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def find_volume_copy_pair_id_by_volume_copy_pair_id(params): + get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (rc, resp) = request(url, ignore_errors=True, method='DELETE', + url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS, + validate_certs=params['validate_certs']) + if rc != 200: + return False, (rc, resp) + else: + return True, (rc, resp) + + +def start_stop_copy(params): + get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % ( + params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy']) + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='POST', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + return True, response_data[0]['percentComplete'] + else: + return False, response_data + + +def check_copy_status(params): + get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % ( + params['ssid'], params['volume_copy_pair_id']) + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='GET', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + if response_data['percentComplete'] != -1: + + return True, response_data['percentComplete'] + else: + return False, response_data['percentComplete'] + else: + return False, response_data + + +def find_valid_copy_pair_targets_and_sources(params): + get_status = 'storage-systems/%s/volumes' % params['ssid'] + url = params['api_url'] + get_status + + (response_code, response_data) = request(url, ignore_errors=True, method='GET', + url_username=params['api_username'], url_password=params['api_password'], + headers=HEADERS, + validate_certs=params['validate_certs']) + + if response_code == 200: + source_capacity = None + candidates = [] + for volume in response_data: + if volume['id'] == params['search_volume_id']: + source_capacity = volume['capacity'] + else: + candidates.append(volume) + + potential_sources = [] + potential_targets = [] + + for volume in candidates: + if volume['capacity'] > source_capacity: + if volume['volumeCopyTarget'] is False: + if volume['volumeCopySource'] is False: + potential_targets.append(volume['id']) + else: + if volume['volumeCopyTarget'] is False: + if volume['volumeCopySource'] is False: + potential_sources.append(volume['id']) + + return potential_targets, potential_sources + + else: + raise Exception("Response [%s]" % response_code) + + +def main(): + module = AnsibleModule(argument_spec=dict( + source_volume_id=dict(type='str'), + destination_volume_id=dict(type='str'), + copy_priority=dict(required=False, default=0, type='int'), + ssid=dict(type='str', default='1'), + api_url=dict(required=True), + api_username=dict(required=False), + api_password=dict(required=False, no_log=True), + validate_certs=dict(required=False, default=True, type='bool'), + targetWriteProtected=dict(required=False, default=True, type='bool'), + onlineCopy=dict(required=False, default=False, type='bool'), + volume_copy_pair_id=dict(type='str'), + state=dict(required=True, choices=['present', 'absent'], type='str'), + create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'), + start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'), + search_volume_id=dict(type='str'), + ), + mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'], + ['volume_copy_pair_id', 'source_volume_id'], + ['volume_copy_pair_id', 'search_volume_id'], + ['search_volume_id', 'destination_volume_id'], + ['search_volume_id', 'source_volume_id'], + ], + required_together=[['source_volume_id', 'destination_volume_id'], + ], + required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ], + ["start_stop_copy", 'stop', ['volume_copy_pair_id'], ], + ["start_stop_copy", 'start', ['volume_copy_pair_id'], ], + ] + + ) + params = module.params + + if not params['api_url'].endswith('/'): + params['api_url'] += '/' + + # Check if we want to search + if params['search_volume_id'] is not None: + try: + potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params) + except Exception as e: + module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % to_native(e)) + + module.exit_json(changed=False, + msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)), + search_volume_id=params['search_volume_id'], + valid_targets=potential_targets, + valid_sources=potential_sources) + + # Check if we want to start or stop a copy operation + if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop': + + # Get the current status info + currenty_running, status_info = check_copy_status(params) + + # If we want to start + if params['start_stop_copy'] == 'start': + + # If we have already started + if currenty_running is True: + module.exit_json(changed=False, msg='Volume Copy Pair copy has started.', + volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info) + # If we need to start + else: + + start_status, info = start_stop_copy(params) + + if start_status is True: + module.exit_json(changed=True, msg='Volume Copy Pair copy has started.', + volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info) + else: + module.fail_json(msg="Could not start volume copy pair Error: %s" % info) + + # If we want to stop + else: + # If it has already stopped + if currenty_running is False: + module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.', + volume_copy_pair_id=params['volume_copy_pair_id']) + + # If we need to stop it + else: + start_status, info = start_stop_copy(params) + + if start_status is True: + module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.', + volume_copy_pair_id=params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not stop volume copy pair Error: %s" % info) + + # If we want the copy pair to exist we do this stuff + if params['state'] == 'present': + + # We need to check if it exists first + if params['volume_copy_pair_id'] is None: + params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id( + params) + + # If no volume copy pair is found we need need to make it. + if params['volume_copy_pair_id'] is None: + + # In order to create we can not do so with just a volume_copy_pair_id + + copy_began_status, (rc, resp) = create_copy_pair(params) + + if copy_began_status is True: + module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id']) + else: + module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp)) + + # If it does exist we do nothing + else: + # We verify that it exists + exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id( + params) + + if exist_status: + module.exit_json(changed=False, + msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id']) + else: + if exist_status_code == 404: + module.fail_json( + msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' % + params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % ( + exist_status_code, exist_status_data)) + + module.fail_json(msg="Done") + + # If we want it to not exist we do this + else: + + if params['volume_copy_pair_id'] is None: + params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id( + params) + + # We delete it by the volume_copy_pair_id + delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params) + + if delete_status is True: + module.exit_json(changed=True, + msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id']) + else: + if delete_status_code == 404: + module.exit_json(changed=False, + msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id']) + else: + module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % ( + delete_status_code, delete_status_data)) + + +if __name__ == '__main__': + main() |