summaryrefslogtreecommitdiffstats
path: root/ansible_collections/dellemc/openmanage/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-05 16:18:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-05 16:18:41 +0000
commitb643c52cf29ce5bbab738b43290af3556efa1ca9 (patch)
tree21d5c53d7a9b696627a255777cefdf6f78968824 /ansible_collections/dellemc/openmanage/plugins
parentReleasing progress-linux version 9.5.1+dfsg-1~progress7.99u1. (diff)
downloadansible-b643c52cf29ce5bbab738b43290af3556efa1ca9.tar.xz
ansible-b643c52cf29ce5bbab738b43290af3556efa1ca9.zip
Merging upstream version 10.0.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/dellemc/openmanage/plugins')
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/README.md3
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py7
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/session_utils.py322
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py23
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py5
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_diagnostics.py874
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py515
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py21
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_session.py425
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_storage_volume.py924
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py13
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py14
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py39
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py198
15 files changed, 3232 insertions, 169 deletions
diff --git a/ansible_collections/dellemc/openmanage/plugins/README.md b/ansible_collections/dellemc/openmanage/plugins/README.md
index 7711a1d84..3864a2bb8 100644
--- a/ansible_collections/dellemc/openmanage/plugins/README.md
+++ b/ansible_collections/dellemc/openmanage/plugins/README.md
@@ -28,6 +28,7 @@ Here are the list of modules and module_utils supported by Dell.
├── idrac_bios.py
├── idrac_boot.py
├── idrac_certificates.py
+ ├── idrac_diagnostics.py
├── idrac_firmware.py
├── idrac_firmware_info.py
├── idrac_license.py
@@ -41,6 +42,8 @@ Here are the list of modules and module_utils supported by Dell.
├── idrac_redfish_storage_controller.py
├── idrac_reset.py
├── idrac_server_config_profile.py
+ ├── idrac_session.py
+ ├── idrac_storage_volume.py
├── idrac_syslog.py
├── idrac_system_info.py
├── idrac_timezone_ntp.py
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
index b2b2240d0..cdd94c3cd 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
# Dell OpenManage Ansible Modules
-# Version 7.1.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 9.1.0
+# Copyright (C) 2019-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -30,6 +30,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.module_utils.common.parameters import env_fallback
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import compress_ipv6
try:
from omsdk.sdkinfra import sdkinfra
from omsdk.sdkcreds import UserCredentials
@@ -56,7 +57,7 @@ class iDRACConnection:
def __init__(self, module_params):
if not HAS_OMSDK:
raise ImportError("Dell OMSDK library is required for this module")
- self.idrac_ip = module_params['idrac_ip']
+ self.idrac_ip = compress_ipv6(module_params['idrac_ip'])
self.idrac_user = module_params['idrac_user']
self.idrac_pwd = module_params['idrac_password']
self.idrac_port = module_params['idrac_port']
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/session_utils.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/session_utils.py
new file mode 100644
index 000000000..4bead057a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/session_utils.py
@@ -0,0 +1,322 @@
+# -*- coding: utf-8 -*-
+
+# Dell OpenManage Ansible Modules
+# Version 9.2.0
+# Copyright (C) 2024 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import config_ipv6
+
+HEADER_TYPE = "application/json"
+
+
+class OpenURLResponse():
+ """
+ HTTP response handler class.
+ """
+ def __init__(self, resp):
+ """
+ Initializes a new instance of the class.
+
+ Args:
+ resp (Response): The response object to read the body from.
+
+ Initializes the following instance variables:
+ - body (bytes): The body of the response, or None if the response is None.
+ - resp (Response): The response object.
+
+ If the response is not None, the body is set to the content of the response.
+ """
+ self.body = None
+ self.resp = resp
+ if self.resp:
+ self.body = self.resp.read()
+
+ @property
+ def json_data(self):
+ """
+ Returns the JSON data parsed from the `body` attribute of the object.
+
+ :return: The parsed JSON data.
+ :raises ValueError: If the `body` attribute cannot be parsed as JSON.
+ """
+ try:
+ return json.loads(self.body)
+ except ValueError as exc:
+ raise ValueError("Unable to parse json") from exc
+
+ @property
+ def status_code(self):
+ """
+ Get the status code of the response.
+
+ Returns:
+ int: The status code of the response.
+ """
+ return self.resp.getcode()
+
+ @property
+ def success(self):
+ """
+ Returns a boolean indicating whether the status code of the response is within the range
+ of 200-299.
+
+ :return: True if the status code is within the range of 200-299, False otherwise.
+ :rtype: bool
+ """
+ status = self.status_code
+ return status >= 200 & status <= 299
+
+ @property
+ def headers(self):
+ """
+ Returns the headers of the response object.
+
+ :return: A dictionary containing the headers of the response object.
+ :rtype: dict
+ """
+ return self.resp.headers
+
+ @property
+ def reason(self):
+ """
+ Get the reason for the response.
+
+ Returns:
+ str: The reason for the response.
+ """
+ return self.resp.reason
+
+
+class SessionAPI():
+ """
+ Main class for session operations.
+ """
+ def __init__(self, module_params):
+ """
+ Initializes the object with the given module parameters.
+
+ Args:
+ module_params (dict): A dictionary containing the module parameters.
+ - "hostname" (str): The IP address or hostname of the target system.
+ - "username" (str): The username for authentication.
+ - "password" (str): The password for authentication.
+ - "port" (int, optional): The port number. Defaults to None.
+ - "validate_certs" (bool, optional): Whether to validate SSL certificates. Defaults
+ to False.
+ - "ca_path" (str, optional): The path to the CA certificate file. Defaults to None.
+ - "timeout" (int, optional): The timeout value in seconds. Defaults to None.
+ - "use_proxy" (bool, optional): Whether to use a proxy. Defaults to True.
+
+ Returns:
+ None
+ """
+ self.ipaddress = module_params.get("hostname")
+ self.username = module_params.get("username")
+ self.password = module_params.get("password")
+ self.port = module_params.get("port")
+ self.validate_certs = module_params.get("validate_certs", False)
+ self.ca_path = module_params.get("ca_path")
+ self.timeout = module_params.get("timeout")
+ self.use_proxy = module_params.get("use_proxy", True)
+ self.protocol = 'https'
+ self.ipaddress = config_ipv6(self.ipaddress)
+ self.set_headers(module_params)
+
+ def set_headers(self, module_params):
+ """
+ Set the headers for the HTTP request based on the module parameters.
+
+ Parameters:
+ module_params (dict): The module parameters containing the state and auth_token.
+
+ Returns:
+ None
+
+ This function sets the headers for the HTTP request based on the state parameter in the
+ module_params.
+ If the state is "present", the headers will include 'Content-Type' and 'Accept' with values
+ 'application/json'.
+ If the state is not "present", the headers will include 'Content-Type', 'Accept', and
+ 'X-Auth-Token' with the value from the auth_token parameter in module_params.
+ """
+ if module_params.get("state") == "present":
+ self._headers = {
+ 'Content-Type': HEADER_TYPE,
+ 'Accept': HEADER_TYPE
+ }
+ else:
+ self._headers = {
+ 'Content-Type': HEADER_TYPE,
+ 'Accept': HEADER_TYPE,
+ 'X-Auth-Token': module_params.get("auth_token")
+ }
+
+ def _get_url(self, uri):
+ """
+ Generate the full URL by combining the protocol, IP address, port, and URI.
+
+ Parameters:
+ uri (str): The URI to be appended to the URL.
+
+ Returns:
+ str: The full URL generated by combining the protocol, IP address, port, and URI.
+ """
+ return f"{self.protocol}://{self.ipaddress}:{self.port}{uri}"
+
+ def _build_url(self, path, query_param=None):
+ """
+ Builds a URL by concatenating the base URI with the given path and query parameters.
+
+ Args:
+ path (str): The path component of the URL.
+ query_param (dict, optional): A dictionary of query parameters to be appended to the
+ URL. Defaults to None.
+
+ Returns:
+ str: The fully constructed URL.
+
+ Raises:
+ None
+
+ Examples:
+ >>> session = SessionUtils()
+ >>> session._build_url("/api/endpoint", {"param1": "value1", "param2": "value2"})
+ "/api/endpoint?param1=value1&param2=value2"
+ """
+ url = path
+ base_uri = self._get_url(url)
+ if path:
+ url = base_uri
+ if query_param:
+ url += f"?{urlencode(query_param)}"
+ return url
+
+ def _url_common_args_spec(self, method, api_timeout, headers=None):
+ """
+ Generates the common arguments for a URL request.
+
+ Args:
+ method (str): The HTTP method for the request.
+ api_timeout (int, optional): The timeout for the API request. If None, the default
+ timeout is used.
+ headers (dict, optional): Additional headers to include in the request.
+
+ Returns:
+ dict: A dictionary containing the common arguments for the URL request. The dictionary
+ has the following keys:
+ - method (str): The HTTP method for the request.
+ - validate_certs (bool): Whether to validate the SSL certificates.
+ - ca_path (str): The path to the CA certificate bundle.
+ - use_proxy (bool): Whether to use a proxy for the request.
+ - headers (dict): The headers to include in the request.
+ - timeout (int): The timeout for the request.
+ - follow_redirects (str): The policy for following redirects.
+
+ """
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ if api_timeout is None:
+ api_timeout = self.timeout
+ if self.ca_path is None:
+ self.ca_path = self._get_omam_ca_env()
+ url_kwargs = {
+ "method": method,
+ "validate_certs": self.validate_certs,
+ "ca_path": self.ca_path,
+ "use_proxy": self.use_proxy,
+ "headers": req_header,
+ "timeout": api_timeout,
+ "follow_redirects": 'all',
+ }
+ return url_kwargs
+
+ def _args_session(self, method, api_timeout, headers=None):
+ """
+ Returns a dictionary containing the arguments needed to establish a session.
+
+ :param path: A string representing the path of the API endpoint.
+ :param method: A string representing the HTTP method to be used.
+ :param api_timeout: An integer representing the timeout for the API request.
+ :param headers: An optional dictionary containing additional headers to be included in the
+ request.
+ :return: A dictionary containing the arguments needed to establish a session, including the
+ URL arguments, headers, and API timeout.
+ """
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ return url_kwargs
+
+ def invoke_request(self, uri, method, data=None, query_param=None, headers=None,
+ api_timeout=None, dump=True):
+ """
+ Invokes a request to the specified URI using the given method and optional parameters.
+
+ :param uri: The URI to send the request to.
+ :type uri: str
+ :param method: The HTTP method to use for the request.
+ :type method: str
+ :param data: The data to send with the request (default: None).
+ :type data: dict or None
+ :param query_param: The query parameters to include in the request URL (default: None).
+ :type query_param: dict or None
+ :param headers: The headers to include in the request (default: None).
+ :type headers: dict or None
+ :param api_timeout: The timeout for the request in seconds (default: None).
+ :type api_timeout: int or None
+ :param dump: Whether to dump the data to JSON before sending the request (default: True).
+ :type dump: bool
+ :return: The response data from the request.
+ :rtype: OpenURLResponse
+ """
+ url_kwargs = self._args_session(method, api_timeout, headers=headers)
+ if data and dump:
+ data = json.dumps(data)
+ url = self._build_url(uri, query_param=query_param)
+ resp = open_url(url, data=data, **url_kwargs)
+ resp_data = OpenURLResponse(resp)
+ return resp_data
+
+ def _get_omam_ca_env(self):
+ """
+ Returns the value of the environment variable REQUESTS_CA_BUNDLE, or if it is not set,
+ the value of the environment variable CURL_CA_BUNDLE, or if that is not set,
+ the value of the environment variable OMAM_CA_BUNDLE.
+
+ :return: The value of the environment variable, or None if none of the variables are set.
+ :rtype: str or None
+ """
+ return (os.environ.get("REQUESTS_CA_BUNDLE") or
+ os.environ.get("CURL_CA_BUNDLE") or
+ os.environ.get("OMAM_CA_BUNDLE"))
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
index 3d8abfbe5..b838197e0 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
# Dell OpenManage Ansible Modules
-# Version 8.2.0
-# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 9.1.0
+# Copyright (C) 2022-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -85,6 +85,7 @@ def config_ipv6(hostname):
if ']:' in ip_addr:
ip_addr, port = ip_addr.split(']:')
ip_addr = ip_addr.strip('[]')
+ ip_addr = compress_ipv6(ip_addr)
if port is None or port == "":
hostname = "[{0}]".format(ip_addr)
else:
@@ -92,6 +93,20 @@ def config_ipv6(hostname):
return hostname
+def compress_ipv6(ipv6_long):
+ groups = ipv6_long.split(':')
+ temp = []
+ for group in groups:
+ group = re.sub(r'^0+', '', group)
+ group = group.lower()
+ if 0 == len(group):
+ group = '0'
+ temp.append(group)
+ tempstr = ':'.join(temp)
+ ipv6_short = re.sub(r'(:0)+', ':', tempstr, 1)
+ return ipv6_short
+
+
def job_tracking(rest_obj, job_uri, max_job_wait_sec=600, job_state_var=('LastRunStatus', 'Id'),
job_complete_states=(2060, 2020, 2090), job_fail_states=(2070, 2101, 2102, 2103),
job_running_states=(2050, 2040, 2030, 2100),
@@ -493,12 +508,14 @@ def get_current_time(redfish_obj):
return curr_time, date_offset
-def xml_data_conversion(attr_dict, fqdd=None):
+def xml_data_conversion(attr_dict, fqdd=None, custom_payload_to_add=None):
component = """<Component FQDD="{0}">{1}</Component>"""
attr = ""
for k, v in attr_dict.items():
key = re.sub(r"\.(?!\d)", "#", k)
attr += '<Attribute Name="{0}">{1}</Attribute>'.format(key, v)
+ if custom_payload_to_add:
+ attr += custom_payload_to_add
root = component.format(fqdd, attr)
return root
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
index e8021db18..050859c6d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
@@ -18,6 +18,11 @@ DOCUMENTATION = r'''
module: dellemc_idrac_storage_volume
short_description: Configures the RAID configuration attributes
version_added: "2.0.0"
+deprecated:
+ removed_at_date: "2026-03-31"
+ why: Replaced with M(dellemc.openmanage.idrac_storage_volume).
+ alternative: Use M(dellemc.openmanage.idrac_storage_volume) instead.
+ removed_from_collection: dellemc.openmanage
description:
- This module is responsible for configuring the RAID attributes.
extends_documentation_fragment:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_diagnostics.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_diagnostics.py
new file mode 100644
index 000000000..3df5a68fd
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_diagnostics.py
@@ -0,0 +1,874 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 9.0.0
+# Copyright (C) 2024 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: idrac_diagnostics
+short_description: Run and Export iDRAC diagnostics
+version_added: "9.0.0"
+description:
+ - This module allows you to run and export diagnostics on iDRAC.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ run:
+ description:
+ - Run the diagnostics job on iDRAC.
+ - Run the diagnostics job based on the I(run_mode) and save the report in the internal storage. I(reboot_type) is applicable.
+ type: bool
+ export:
+ description:
+ - Exports the diagnostics information to the given share.
+ - This operation requires I(share_parameters).
+ - When I(run) is C(true) and I(job_wait) is C(false), only then the run diagnostics job is triggered. I(export) is ignored.
+ type: bool
+ run_mode:
+ description:
+ - This option provides the choices to run the diagnostics.
+ - C(express) The express diagnostics runs a test package for each server subsystem. However,
+ it does not run the complete set of tests available in the package for each subsystem.
+ - C(extended) The extended diagnostics run all available tests in each test package for all subsystems.
+ - C(long_run) The long-run diagnostics runs express and extended tests.
+ type: str
+ choices: [express, extended, long_run]
+ default: express
+ reboot_type:
+ description:
+ - This option provides the choice to reboot the host immediately to run the diagnostics.
+ - This is applicable when I(run) is C(true).
+ - C(force) Forced graceful shutdown signals the operating system to turn off and wait for ten minutes.
+ If the operating system does not turn off, the iDRAC power cycles the system.
+ - C(graceful) Graceful shutdown waits for the operating system to turn off and wait for the system to restart.
+ - C(power_cycle) performs a power cycle for a hard reset on the device.
+ type: str
+ choices: [force, graceful, power_cycle]
+ default: graceful
+ scheduled_start_time:
+ description:
+ - Schedules the job at the specified time.
+ - The accepted formats are yyyymmddhhmmss and YYYY-MM-DDThh:mm:ss+HH:MM.
+ - This is applicable when I(run) is C(true) and I(reboot_type) is power_cycle.
+ type: str
+ scheduled_end_time:
+ description:
+ - Run the diagnostic until the specified end date and end time after the I(scheduled_start_time).
+ - The accepted formats are yyyymmddhhmmss and YYYY-MM-DDThh:mm:ss+HH:MM.
+ - If the run operation does not complete before the specified end time, then the operation fails.
+ - This is applicable when I(run) is C(True) and I(reboot_type) is C(power_cycle).
+ type: str
+ job_wait:
+ description:
+ - Provides the option to wait for job completion.
+ - This is applicable when I(run) is C(true) and I(reboot_type) is C(power_cycle).
+ - This is applicable only to run the diagnostics job.
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - Time in seconds to wait for job completion.
+ - This is applicable when I(job_wait) is C(true).
+ type: int
+ default: 1200
+ share_parameters:
+ description:
+ - Parameters that are required for the export operation of diagnostics.
+ - I(share_parameters) is required when I(export) is C(true).
+ type: dict
+ suboptions:
+ share_type:
+ description:
+ - Share type of the network share.
+ - C(local) uses local path for I(export) operation.
+ - C(nfs) uses NFS share for I(export) operation.
+ - C(cifs) uses CIFS share for I(export) operation.
+ - C(http) uses HTTP share for I(export) operation.
+ - C(https) uses HTTPS share for I(export) operation.
+ type: str
+ choices: [local, nfs, cifs, http, https]
+ default: local
+ file_name:
+ description:
+ - Diagnostics file name for I(export) operation.
+ type: str
+ ip_address:
+ description:
+ - IP address of the network share.
+ - I(ip_address) is required when I(share_type) is C(nfs), C(cifs), C(http) or C(https).
+ type: str
+ share_name:
+ description:
+ - Network share or local path of the diagnostics file.
+ type: str
+ workgroup:
+ description:
+ - Workgroup of the network share.
+ - I(workgroup) is applicable only when I(share_type) is C(cifs).
+ type: str
+ username:
+ description:
+ - Username of the network share.
+ - I(username) is required when I(share_type) is C(cifs).
+ type: str
+ password:
+ description:
+ - Password of the network share.
+ - I(password) is required when I(share_type) is C(cifs).
+ type: str
+ ignore_certificate_warning:
+ description:
+ - Ignores the certificate warning while connecting to Share and is only applicable when I(share_type) is C(https).
+ - C(off) ignores the certificate warning.
+ - C(on) does not ignore the certificate warning.
+ type: str
+ choices: ["off", "on"]
+ default: "off"
+ proxy_support:
+ description:
+ - Specifies if proxy support must be used or not.
+ - C(off) does not use proxy settings.
+ - C(default_proxy) uses the default proxy settings.
+ - C(parameters_proxy) uses the specified proxy settings. I(proxy_server) is required when I(proxy_support) is C(parameters_proxy).
+ - I(proxy_support) is only applicable when I(share_type) is C(http) or C(https).
+ type: str
+ choices: ["off", "default_proxy", "parameters_proxy"]
+ default: "off"
+ proxy_type:
+ description:
+ - The proxy type of the proxy server.
+ - C(http) to select HTTP proxy.
+ - C(socks) to select SOCKS proxy.
+ - I(proxy_type) is only applicable when I(share_type) is C(http) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ choices: [http, socks]
+ default: http
+ proxy_server:
+ description:
+ - The IP address of the proxy server.
+ - I(proxy_server) is required when I(proxy_support) is C(parameters_proxy).
+ - I(proxy_server) is only applicable when I(share_type) is C(http) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ proxy_port:
+ description:
+ - The port of the proxy server.
+ - I(proxy_port) is only applicable when I(share_type) is C(http) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: int
+ default: 80
+ proxy_username:
+ description:
+ - The username of the proxy server.
+ - I(proxy_username) is only applicable when I(share_type) is C(http) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ proxy_password:
+ description:
+ - The password of the proxy server.
+ - I(proxy_password) is only applicable when I(share_type) is C(http) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ resource_id:
+ type: str
+ description:
+ - Id of the resource.
+ - If the value for resource ID is not provided, the module picks the first resource ID available from the list of system resources returned by the iDRAC.
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Shivam Sharma(@ShivamSh3)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports only iDRAC9 and above.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+ - This module requires 'Dell Diagnostics' firmware package to be present on the server.
+ - When I(share_type) is C(local) for I(export) operation, job_details are not displayed.
+"""
+
+EXAMPLES = r"""
+---
+- name: Run and export the diagnostics to local path
+ dellemc.openmanage.idrac_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "path/to/ca_file"
+ run: true
+ export: true
+ share_parameters:
+ share_type: "local"
+ share_path: "/opt/local/diagnostics/"
+ file_name: "diagnostics.txt"
+
+- name: Run the diagnostics with power cycle reboot on schedule
+ dellemc.openmanage.idrac_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "path/to/ca_file"
+ run: true
+ run_mode: "express"
+ reboot_type: "power_cycle"
+ scheduled_start_time: 20240101101015
+
+- name: Run and export the diagnostics to HTTPS share
+ dellemc.openmanage.idrac_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "path/to/ca_file"
+ run: true
+ export: true
+ share_parameters:
+ share_type: "HTTPS"
+ ignore_certificate_warning: "on"
+ share_name: "/share_path/diagnostics_collection_path"
+ ip_address: "192.168.0.2"
+ file_name: "diagnostics.txt"
+
+- name: Run and export the diagnostics to NFS share
+ dellemc.openmanage.idrac_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "path/to/ca_file"
+ run: true
+ export: true
+ share_parameters:
+ share_type: "NFS"
+ share_name: "nfsshare/diagnostics_collection_path/"
+ ip_address: "192.168.0.3"
+ file_name: "diagnostics.txt"
+
+- name: Export the diagnostics to CIFS share
+ dellemc.openmanage.idrac_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "path/to/ca_file"
+ export: true
+ share_parameters:
+ share_type: "CIFS"
+ share_name: "/cifsshare/diagnostics_collection_path/"
+ ip_address: "192.168.0.4"
+ file_name: "diagnostics.txt"
+
+- name: Export the diagnostics to HTTPS share via proxy
+ dellemc.openmanage.idrac_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "path/to/ca_file"
+ export: true
+ share_parameters:
+ share_type: "HTTPS"
+ share_name: "/share_path/diagnostics_collection_path"
+ ignore_certificate_warning: "on"
+ ip_address: "192.168.0.2"
+ file_name: "diagnostics.txt"
+ proxy_support: parameters_proxy
+ proxy_type: http
+ proxy_server: "192.168.0.5"
+ proxy_port: 1080
+ proxy_username: "proxy_user"
+ proxy_password: "proxy_password"
+"""
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the diagnostics operation.
+ returned: always
+ sample: "Successfully ran and exported the diagnostics."
+job_details:
+ description: Returns the output for status of the job.
+ returned: For run and export operations
+ type: dict
+ sample: {
+ "ActualRunningStartTime": "2024-01-10T10:14:31",
+ "ActualRunningStopTime": "2024-01-10T10:26:34",
+ "CompletionTime": "2024-01-10T10:26:34",
+ "Description": "Job Instance",
+ "EndTime": "2024-01-10T10:30:15",
+ "Id": "JID_XXXXXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "RemoteDiagnostics",
+ "Message": "Job completed successfully.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "SYS018",
+ "Name": "Remote Diagnostics",
+ "PercentComplete": 100,
+ "StartTime": "2024-01-10T10:12:15",
+ "TargetSettingsURI": null
+ }
+diagnostics_file_path:
+ description: Returns the full path of the diagnostics file.
+ returned: For export operation
+ type: str
+ sample: "/share_path/diagnostics_collection_path/diagnostics.txt"
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.12.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "A Remote Diagnostic (ePSA) job already exists.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "IDRAC.2.9.SYS098",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "A response action is not required if the scheduled start time of the existing Remote Diagnostic (ePSA) job is ok.
+ Else, delete the existing Diagnostics (ePSA) job and recreate another with an appropriate start time.",
+ "Severity": "Informational"
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (
+ config_ipv6, get_current_time, get_dynamic_uri, validate_and_get_first_resource_id_uri, remove_key, idrac_redfish_job_tracking)
+from datetime import datetime
+
+MANAGERS_URI = "/redfish/v1/Managers"
+
+OEM = "Oem"
+MANUFACTURER = "Dell"
+JOBS = "Jobs"
+JOBS_EXPAND = "?$expand=*($levels=1)"
+LC_SERVICE = "DellLCService"
+ACTIONS = "Actions"
+EXPORT = "#DellLCService.ExportePSADiagnosticsResult"
+RUN = "#DellLCService.RunePSADiagnostics"
+TEST_SHARE = "#DellLCService.TestNetworkShare"
+ODATA_REGEX = "(.*?)@odata"
+ODATA = "@odata.id"
+MESSAGE_EXTENDED_INFO = "@Message.ExtendedInfo"
+TIME_FORMAT_FILE = "%Y%m%d_%H%M%S"
+TIME_FORMAT_WITHOUT_OFFSET = "%Y%m%d%H%M%S"
+TIME_FORMAT_WITH_OFFSET = "%Y-%m-%dT%H:%M:%S%z"
+SUCCESS_EXPORT_MSG = "Successfully exported the diagnostics."
+SUCCESS_RUN_MSG = "Successfully ran the diagnostics operation."
+SUCCESS_RUN_AND_EXPORT_MSG = "Successfully ran and exported the diagnostics."
+RUNNING_RUN_MSG = "Successfully triggered the job to run diagnostics."
+ALREADY_RUN_MSG = "The diagnostics job is already present."
+INVALID_DIRECTORY_MSG = "Provided directory path '{path}' is not valid."
+NO_OPERATION_SKIP_MSG = "The operation is skipped."
+INSUFFICIENT_DIRECTORY_PERMISSION_MSG = "Provided directory path '{path}' is not writable. " \
+ "Please check if the directory has appropriate permissions"
+UNSUPPORTED_FIRMWARE_MSG = "iDRAC firmware version is not supported."
+TIMEOUT_NEGATIVE_OR_ZERO_MSG = "The parameter `job_wait_timeout` value cannot be negative or zero."
+WAIT_TIMEOUT_MSG = "The job is not complete after {0} seconds."
+START_TIME = "The specified scheduled time occurs in the past, " \
+ "provide a future time to schedule the job."
+INVALID_TIME = "The specified date and time `{0}` to schedule the diagnostics is not valid. Enter a valid date and time."
+END_START_TIME = "The end time `{0}` to schedule the diagnostics must be greater than the start time `{1}`."
+CHANGES_FOUND_MSG = "Changes found to be applied."
+NO_FILE = "The diagnostics file does not exist."
+
+PROXY_SUPPORT = {"off": "Off", "default_proxy": "DefaultProxy", "parameters_proxy": "ParametersProxy"}
+STATUS_SUCCESS = [200, 202]
+
+
+class Diagnostics:
+
+ def __init__(self, idrac, module):
+ self.idrac = idrac
+ self.module = module
+ self.diagnostics_file_path = None
+ self.run_url = None
+ self.export_url = None
+ self.share_name = None
+ self.file_name = None
+
+ def execute(self):
+ # To be overridden by the subclasses
+ pass
+
+ def get_payload_details(self):
+ payload = {}
+ payload["ShareType"] = self.module.params.get('share_parameters').get('share_type').upper()
+ payload["IPAddress"] = self.module.params.get('share_parameters').get('ip_address')
+ payload["ShareName"] = self.module.params.get('share_parameters').get('share_name')
+ payload["UserName"] = self.module.params.get('share_parameters').get('username')
+ payload["Password"] = self.module.params.get('share_parameters').get('password')
+ payload["FileName"] = self.module.params.get('share_parameters').get('file_name')
+ payload["IgnoreCertWarning"] = self.module.params.get('share_parameters').get('ignore_certificate_warning').capitalize()
+ if self.module.params.get('share_parameters').get('proxy_support') == "parameters_proxy":
+ payload["ProxySupport"] = PROXY_SUPPORT[self.module.params.get('share_parameters').get('proxy_support')]
+ payload["ProxyType"] = self.module.params.get('share_parameters').get('proxy_type').upper()
+ payload["ProxyServer"] = self.module.params.get('share_parameters').get('proxy_server')
+ payload["ProxyPort"] = str(self.module.params.get('share_parameters').get('proxy_port'))
+ if self.module.params.get('share_parameters').get('proxy_username') and self.module.params.get('share_parameters').get('proxy_password'):
+ payload["ProxyUname"] = self.module.params.get('share_parameters').get('proxy_username')
+ payload["ProxyPasswd"] = self.module.params.get('share_parameters').get('proxy_password')
+ return payload
+
+ def test_network_share(self):
+ payload = self.get_payload_details()
+ del payload["FileName"]
+ payload = {key: value for key, value in payload.items() if value is not None}
+ if payload.get("ShareType") == "LOCAL":
+ path = payload.get("ShareName")
+ if not (os.path.exists(path)):
+ self.module.exit_json(msg=INVALID_DIRECTORY_MSG.format(path=path), failed=True)
+ if not os.access(path, os.W_OK):
+ self.module.exit_json(msg=INSUFFICIENT_DIRECTORY_PERMISSION_MSG.format(path=path), failed=True)
+ else:
+ try:
+ test_url = self.get_test_network_share_url()
+ self.idrac.invoke_request(test_url, "POST", data=payload)
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ message_details = filter_err.get('error').get(MESSAGE_EXTENDED_INFO)[0]
+ message = message_details.get('Message')
+ self.module.exit_json(msg=message, error_info=filter_err, failed=True)
+
+ def get_test_network_share_url(self):
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, MANAGERS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ resp = get_dynamic_uri(self.idrac, uri)
+ url = resp.get('Links', {}).get(OEM, {}).get(MANUFACTURER, {}).get(LC_SERVICE, {}).get(ODATA, {})
+ action_resp = get_dynamic_uri(self.idrac, url)
+ url = action_resp.get(ACTIONS, {}).get(TEST_SHARE, {}).get('target', {})
+ return url
+
+
+class RunDiagnostics(Diagnostics):
+
+ def execute(self):
+ msg, job_details = None, None
+ if self.module.params.get('export'):
+ self.test_network_share()
+ self.__get_run_diagnostics_url()
+ self.check_diagnostics_jobs()
+ self.__validate_job_timeout()
+ run_diagnostics_status = self.__run_diagnostics()
+ job_status = self.__perform_job_wait(run_diagnostics_status)
+ status = run_diagnostics_status.status_code
+ if status in STATUS_SUCCESS and job_status.get('JobState') == "Completed":
+ msg = SUCCESS_RUN_MSG
+ job_details = job_status
+ if status in STATUS_SUCCESS and job_status.get('JobState') in ["Scheduled", "Scheduling", "Running", "New"]:
+ msg = RUNNING_RUN_MSG
+ job_details = job_status
+ return msg, job_details, None
+
+ def __run_diagnostics(self):
+ reboot_job_types = {
+ "graceful": "GracefulRebootWithoutForcedShutdown",
+ "force": "GracefulRebootWithForcedShutdown",
+ "power_cycle": "PowerCycle"
+ }
+ run_modes = {
+ "express": "Express",
+ "extended": "Extended",
+ "long_run": "ExpressAndExtended"
+ }
+ payload = {}
+ reboot_type = self.module.params.get('reboot_type')
+ run_mode = self.module.params.get('run_mode')
+ if reboot_type == "power_cycle":
+ if self.module.params.get('scheduled_start_time'):
+ start_time = self.__validate_time_format(self.module.params.get('scheduled_start_time'))
+ if self.__validate_time(start_time):
+ payload["ScheduledStartTime"] = start_time
+ if self.module.params.get('scheduled_end_time'):
+ end_time = self.__validate_time_format(self.module.params.get('scheduled_end_time'))
+ if self.__validate_time(end_time):
+ payload["UntilTime"] = end_time
+ if (self.module.params.get('scheduled_start_time') and self.module.params.get('scheduled_end_time')
+ and self.__validate_end_time(start_time, end_time)):
+ payload["UntilTime"] = end_time
+ payload["RebootJobType"] = reboot_job_types.get(reboot_type)
+ payload["RunMode"] = run_modes.get(run_mode)
+ run_diagnostics_status = self.idrac.invoke_request(self.run_url, "POST", data=payload)
+ return run_diagnostics_status
+
+ def __get_run_diagnostics_url(self):
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, MANAGERS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ resp = get_dynamic_uri(self.idrac, uri)
+ url = resp.get('Links', {}).get(OEM, {}).get(MANUFACTURER, {}).get(LC_SERVICE, {}).get(ODATA, {})
+ if url:
+ action_resp = get_dynamic_uri(self.idrac, url)
+ run_url = action_resp.get(ACTIONS, {}).get(RUN, {}).get('target', {})
+ self.run_url = run_url
+ else:
+ self.module.exit_json(msg=UNSUPPORTED_FIRMWARE_MSG, failed=True)
+
+ def __validate_job_timeout(self):
+ if self.module.params.get("job_wait") and self.module.params.get("job_wait_timeout") <= 0:
+ self.module.exit_json(msg=TIMEOUT_NEGATIVE_OR_ZERO_MSG, failed=True)
+
+ def __perform_job_wait(self, run_diagnostics_status):
+ job_dict = {}
+ job_wait = self.module.params.get('job_wait')
+ job_wait_timeout = self.module.params.get('job_wait_timeout')
+ job_tracking_uri = run_diagnostics_status.headers.get("Location")
+ if job_tracking_uri:
+ job_id = job_tracking_uri.split("/")[-1]
+ res_uri = validate_and_get_first_resource_id_uri(self.module, self.idrac, MANAGERS_URI)
+ job_uri = f"{res_uri[0]}/{OEM}/{MANUFACTURER}/{JOBS}/{job_id}"
+ if job_wait:
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(self.idrac, job_uri,
+ max_job_wait_sec=job_wait_timeout,
+ sleep_interval_secs=1)
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ if int(wait_time) >= int(job_wait_timeout):
+ self.module.exit_json(msg=WAIT_TIMEOUT_MSG.format(
+ job_wait_timeout), changed=True, job_status=job_dict)
+ if job_failed:
+ self.module.exit_json(
+ msg=job_dict.get("Message"), job_status=job_dict, failed=True)
+ else:
+ job_resp = self.idrac.invoke_request(job_uri, 'GET')
+ job_dict = job_resp.json_data
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ return job_dict
+
+ def __validate_time_format(self, time):
+ try:
+ datetime_obj = datetime.strptime(time, TIME_FORMAT_WITH_OFFSET)
+ except ValueError:
+ try:
+ datetime_obj = datetime.strptime(time, TIME_FORMAT_WITHOUT_OFFSET)
+ except ValueError:
+ self.module.exit_json(failed=True, msg=INVALID_TIME.format(time))
+ formatted_time = datetime_obj.strftime(TIME_FORMAT_WITHOUT_OFFSET)
+ return formatted_time
+
+ def __validate_time(self, time):
+ curr_idrac_time, offset = get_current_time(self.idrac)
+ curr_idrac_time = datetime.strptime(curr_idrac_time, TIME_FORMAT_WITH_OFFSET)
+ curr_idrac_time = curr_idrac_time.strftime(TIME_FORMAT_WITHOUT_OFFSET)
+ currtime_obj = datetime.strptime(curr_idrac_time, TIME_FORMAT_WITHOUT_OFFSET)
+ starttime_obj = datetime.strptime(time, TIME_FORMAT_WITHOUT_OFFSET)
+ if starttime_obj < currtime_obj:
+ self.module.exit_json(failed=True, msg=START_TIME)
+ return True
+
+ def __validate_end_time(self, start_time, end_time):
+ starttime_obj = datetime.strptime(start_time, TIME_FORMAT_WITHOUT_OFFSET)
+ endtime_obj = datetime.strptime(end_time, TIME_FORMAT_WITHOUT_OFFSET)
+ if starttime_obj > endtime_obj:
+ self.module.exit_json(failed=True, msg=END_START_TIME.format(end_time, start_time))
+ return True
+
+ def check_diagnostics_jobs(self):
+ res_uri = validate_and_get_first_resource_id_uri(self.module, self.idrac, MANAGERS_URI)
+ job_uri = f"{res_uri[0]}/{OEM}/{MANUFACTURER}/{JOBS}{JOBS_EXPAND}"
+ job_resp = self.idrac.invoke_request(job_uri, "GET")
+ job_list = job_resp.json_data.get('Members', [])
+ job_id = ""
+ for jb in job_list:
+ if jb.get("JobType") == "RemoteDiagnostics" and jb.get("JobState") in ["Scheduled", "Running", "Starting", "New"]:
+ job_id = jb['Id']
+ job_dict = remove_key(jb, regex_pattern=ODATA_REGEX)
+ break
+ if self.module.check_mode and job_id:
+ self.module.exit_json(msg=ALREADY_RUN_MSG, job_details=job_dict, skipped=True)
+ if self.module.check_mode and not job_id:
+ self.module.exit_json(msg=CHANGES_FOUND_MSG, changed=True)
+ if job_id:
+ self.module.exit_json(msg=ALREADY_RUN_MSG, job_details=job_dict, skipped=True)
+
+
+class ExportDiagnostics(Diagnostics):
+
+ def execute(self):
+ self.test_network_share()
+ self.__get_export_diagnostics_url()
+ if self.module.check_mode:
+ self.perform_check_mode()
+ job_status = {}
+ share_type = self.module.params.get('share_parameters').get('share_type')
+ share_type_methods = {
+ "local": self.__export_diagnostics_local,
+ "http": self.__export_diagnostics_http,
+ "https": self.__export_diagnostics_http,
+ "cifs": self.__export_diagnostics_cifs,
+ "nfs": self.__export_diagnostics_nfs
+ }
+ export_diagnostics_status = share_type_methods[share_type]()
+ if share_type != "local":
+ job_status = self.get_job_status(export_diagnostics_status)
+ status = export_diagnostics_status.status_code
+ diagnostics_file_path = f"{self.share_name}/{self.file_name}"
+ if status in STATUS_SUCCESS:
+ msg = SUCCESS_EXPORT_MSG
+ job_details = job_status
+ return msg, job_details, diagnostics_file_path
+
+ def __export_diagnostics_local(self):
+ payload = {}
+ payload["ShareType"] = "Local"
+ file_path = self.module.params.get('share_parameters').get('share_name')
+ self.share_name = file_path.rstrip("/")
+ diagnostics_status = self.__export_diagnostics(payload)
+ diagnostics_file_name = payload.get("FileName")
+ diagnostics_data = self.idrac.invoke_request(diagnostics_status.headers.get("Location"), "GET")
+ file_name = os.path.join(file_path, diagnostics_file_name)
+ with open(file_name, "w") as fp:
+ fp.write(diagnostics_data.body.decode().replace("\r", ""))
+ return diagnostics_status
+
+ def __export_diagnostics_http(self):
+ payload = self.get_payload_details()
+ export_status = self.__export_diagnostics(payload)
+ share = self.module.params.get('share_parameters')
+ ip = config_ipv6(share.get('ip_address'))
+ self.share_name = f"{share.get('share_type')}://{ip}/{share.get('share_name').strip('/')}"
+ return export_status
+
+ def __export_diagnostics_cifs(self):
+ payload = self.get_payload_details()
+ if self.module.params.get('share_parameters').get('workgroup'):
+ payload["Workgroup"] = self.module.params.get('share_parameters').get('workgroup')
+ export_status = self.__export_diagnostics(payload)
+ share_name = self.module.params.get('share_parameters').get('share_name').replace("\\", "/")
+ self.share_name = f"//{self.module.params.get('share_parameters').get('ip_address')}/{share_name.strip('/')}"
+ return export_status
+
+ def __export_diagnostics_nfs(self):
+ payload = self.get_payload_details()
+ del payload["UserName"], payload["Password"]
+ export_status = self.__export_diagnostics(payload)
+ share = self.module.params.get('share_parameters')
+ self.share_name = f"{share.get('ip_address')}:/{share.get('share_name').strip('/')}"
+ return export_status
+
+ def __get_export_diagnostics_url(self):
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, MANAGERS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ resp = get_dynamic_uri(self.idrac, uri)
+ url = resp.get('Links', {}).get(OEM, {}).get(MANUFACTURER, {}).get(LC_SERVICE, {}).get(ODATA, {})
+ if url:
+ action_resp = get_dynamic_uri(self.idrac, url)
+ export_url = action_resp.get(ACTIONS, {}).get(EXPORT, {}).get('target', {})
+ self.export_url = export_url
+ else:
+ self.module.exit_json(msg=UNSUPPORTED_FIRMWARE_MSG, failed=True)
+
+ def __export_diagnostics(self, payload):
+ diagnostics_file_name = self.module.params.get('share_parameters').get('file_name')
+ if not diagnostics_file_name:
+ now = datetime.now()
+ hostname = self.module.params.get('idrac_ip')
+ hostname = self.expand_ipv6(hostname)
+ hostname = hostname.replace(":", ".")
+ diagnostics_file_name = f"{hostname}_{now.strftime(TIME_FORMAT_FILE)}.txt"
+ payload["FileName"] = diagnostics_file_name
+ self.file_name = diagnostics_file_name
+ diagnostics_status = self.idrac.invoke_request(self.export_url, "POST", data=payload)
+ return diagnostics_status
+
+ def get_job_status(self, export_diagnostics_status):
+ res_uri = validate_and_get_first_resource_id_uri(self.module, self.idrac, MANAGERS_URI)
+ job_tracking_uri = export_diagnostics_status.headers.get("Location")
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = f"{res_uri[0]}/{OEM}/{MANUFACTURER}/{JOBS}/{job_id}"
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(self.idrac, job_uri)
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ if job_failed:
+ self.module.exit_json(msg=job_dict.get('Message'), failed=True, job_details=job_dict)
+ return job_dict
+
+ def perform_check_mode(self):
+ try:
+ payload = {}
+ payload['ShareType'] = 'Local'
+ export_status = self.idrac.invoke_request(self.export_url, "POST", data=payload)
+ if export_status.status_code in STATUS_SUCCESS:
+ self.module.exit_json(msg=CHANGES_FOUND_MSG, changed=True)
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ message_details = filter_err.get('error').get(MESSAGE_EXTENDED_INFO)[0]
+ message_id = message_details.get('MessageId')
+ if 'SYS099' in message_id:
+ self.module.exit_json(msg=NO_FILE, skipped=True)
+
+ def expand_ipv6(self, ip):
+ sections = ip.split(':')
+ num_sections = len(sections)
+ double_colon_index = sections.index('') if '' in sections else -1
+ if double_colon_index != -1:
+ missing_sections = 8 - num_sections + 1
+ sections[double_colon_index:double_colon_index + 1] = ['0000'] * missing_sections
+ sections = [section.zfill(4) for section in sections]
+ expanded_ip = ':'.join(sections)
+ return expanded_ip
+
+
+class RunAndExportDiagnostics:
+
+ def __init__(self, idrac, module):
+ self.run = RunDiagnostics(idrac, module)
+ self.export = ExportDiagnostics(idrac, module)
+ self.module = module
+
+ def execute(self):
+ msg, job_status, file_path = self.run.execute()
+ if self.module.params.get("job_wait"):
+ msg, job_status, file_path = self.export.execute()
+ msg = SUCCESS_RUN_AND_EXPORT_MSG
+ return msg, job_status, file_path
+
+
+class DiagnosticsType:
+ _diagnostics_classes = {
+ "run": RunDiagnostics,
+ "export": ExportDiagnostics,
+ "run_and_export": RunAndExportDiagnostics
+ }
+
+ @staticmethod
+ def diagnostics_operation(idrac, module):
+ class_type = None
+ if module.params.get("run") and module.params.get("export"):
+ class_type = "run_and_export"
+ elif module.params.get("run"):
+ class_type = "run"
+ elif module.params.get("export"):
+ class_type = "export"
+ if class_type:
+ diagnostics_class = DiagnosticsType._diagnostics_classes.get(class_type)
+ return diagnostics_class(idrac, module)
+ else:
+ module.exit_json(msg=NO_OPERATION_SKIP_MSG, skipped=True)
+
+
+def main():
+ specs = get_argument_spec()
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[["run", "export"]],
+ required_if=[
+ ["run", True, ("reboot_type", "run_mode",)],
+ ["export", True, ("share_parameters",)]
+ ],
+ supports_check_mode=True
+ )
+
+ try:
+ with iDRACRedfishAPI(module.params) as idrac:
+ diagnostics_obj = DiagnosticsType.diagnostics_operation(idrac, module)
+ msg, job_status, file_path = diagnostics_obj.execute()
+ if file_path is None:
+ module.exit_json(msg=msg, changed=True, job_details=job_status)
+ module.exit_json(msg=msg, changed=True, job_details=job_status, diagnostics_file_path=file_path)
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ message_details = filter_err.get('error').get(MESSAGE_EXTENDED_INFO)[0]
+ message_id = message_details.get('MessageId')
+ if 'SYS099' in message_id:
+ module.exit_json(msg=NO_FILE, skipped=True)
+ if 'SYS098' in message_id:
+ module.exit_json(msg=message_details.get('Message'), skipped=True)
+ module.exit_json(msg=str(err), error_info=filter_err, failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (OSError, ValueError, SSLValidationError, ConnectionError, TypeError) as e:
+ module.exit_json(msg=str(e), failed=True)
+
+
+def get_argument_spec():
+ return {
+ "run": {"type": 'bool'},
+ "export": {"type": 'bool'},
+ "run_mode": {
+ "type": 'str',
+ "default": 'express',
+ "choices": ['express', 'extended', 'long_run']
+ },
+ "reboot_type": {
+ "type": 'str',
+ "default": 'graceful',
+ "choices": ['force', 'graceful', 'power_cycle']
+ },
+ "scheduled_start_time": {"type": 'str'},
+ "scheduled_end_time": {"type": 'str'},
+ "job_wait": {"type": 'bool', "default": True},
+ "job_wait_timeout": {"type": 'int', "default": 1200},
+ "share_parameters": {
+ "type": 'dict',
+ "options": {
+ "share_type": {
+ "type": 'str',
+ "default": 'local',
+ "choices": ['local', 'nfs', 'cifs', 'http', 'https']
+ },
+ "proxy_type": {
+ "type": 'str',
+ "default": 'http',
+ "choices": ['http', 'socks']
+ },
+ "username": {"type": 'str'},
+ "password": {"type": 'str', "no_log": True},
+ "proxy_port": {"type": 'int', "default": 80},
+ "file_name": {"type": 'str'},
+ "ignore_certificate_warning": {
+ "type": 'str',
+ "default": "off",
+ "choices": ["off", "on"]
+ },
+ "ip_address": {"type": 'str'},
+ "proxy_server": {"type": 'str'},
+ "workgroup": {"type": 'str'},
+ "proxy_support": {
+ "type": 'str',
+ "default": "off",
+ "choices": ["off", "default_proxy", "parameters_proxy"]
+ },
+ "share_name": {"type": 'str'},
+ "proxy_username": {"type": 'str'},
+ "proxy_password": {"type": 'str', "no_log": True}
+ },
+ "required_together": [
+ ("username", "password"),
+ ("proxy_username", "proxy_password")
+ ],
+ "required_if": [
+ ["share_type", "local", ["share_name"]],
+ ["share_type", "nfs", ["ip_address", "share_name"]],
+ ["share_type", "cifs", ["ip_address", "share_name", "username", "password"]],
+ ["share_type", "http", ["ip_address", "share_name"]],
+ ["share_type", "https", ["ip_address", "share_name"]],
+ ["proxy_support", "parameters_proxy", ["proxy_server"]]
+ ],
+ },
+ "resource_id": {"type": 'str'}
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
index 2c28c9a5f..940f86dc2 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
@@ -3,64 +3,152 @@
#
# Dell OpenManage Ansible Modules
-# Version 7.1.0
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 9.2.0
+# Copyright (C) 2018-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = """
+DOCUMENTATION = r'''
---
module: idrac_reset
-short_description: Reset iDRAC
+short_description: Factory reset the iDRACs
version_added: "2.1.0"
description:
- - This module resets iDRAC.
- - "iDRAC is not accessible for some time after running this module. It is recommended to wait for some time,
- before trying to connect to iDRAC."
+ - This module resets the iDRAC to factory default settings.
extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
+options:
+ reset_to_default:
+ type: str
+ description:
+ - If this value is not set the default behaviour is to restart the iDRAC.
+ - C(All) Discards all settings and reset to default credentials.
+ - C(ResetAllWithRootDefaults) Discards all settings and reset the default username to root and password to the shipping value.
+ - C(Default) Discards all settings, but preserves user and network settings.
+ - C(CustomDefaults) All configuration is set to custom defaults.This option is supported on firmware version 7.00.00.00 and newer versions.
+ choices: ['Default', 'All', 'ResetAllWithRootDefaults', 'CustomDefaults']
+ version_added: 9.2.0
+ custom_defaults_file:
+ description:
+ - Name of the custom default configuration file in the XML format.
+ - This option is applicable when I(reset_to_default) is C(CustomDefaults).
+ - I(custom_defaults_file) is mutually exclusive with I(custom_defaults_buffer).
+ type: str
+ version_added: 9.2.0
+ custom_defaults_buffer:
+ description:
+ - This parameter provides the option to import the buffer input in XML format as a custom default configuration.
+ - This option is applicable when I(reset_to_default) is C(CustomDefaults).
+ - I(custom_defaults_buffer) is mutually exclusive with I(custom_defaults_file).
+ type: str
+ version_added: 9.2.0
+ wait_for_idrac:
+ description:
+ - This parameter provides the option to wait for the iDRAC to reset and lifecycle controller status to be ready.
+ type: bool
+ default: true
+ version_added: 9.2.0
+ job_wait_timeout:
+ description:
+ - Time in seconds to wait for job completion.
+ - This is applicable when I(job_wait) is C(true).
+ type: int
+ default: 600
+ version_added: 9.2.0
+ force_reset:
+ description:
+ - This parameter provides the option to force reset the iDRAC without checking the iDRAC lifecycle controller status.
+ - This option is applicable only for iDRAC9.
+ type: bool
+ default: false
+ version_added: 9.2.0
requirements:
- - "omsdk >= 1.2.488"
- "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
+ - "Lovepreet Singh (@singh-lovepreet1)"
notes:
- Run this module from a system that has direct access to Dell iDRAC.
- This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
-"""
+ - If reset_to_default option is not specified, then this module triggers a graceful restart.
+ - This module skips the execution if reset options are not supported by the iDRAC.
+'''
-EXAMPLES = """
+EXAMPLES = r'''
---
-- name: Reset iDRAC
+- name: Reset the iDRAC to all and wait till the iDRAC is accessible.
+ dellemc.openmanage.idrac_reset:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_to_default: "All"
+
+- name: Reset the iDRAC to default and do not wait till the iDRAC is accessible.
+ dellemc.openmanage.idrac_reset:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_to_default: "Default"
+ wait_for_idrac: false
+
+- name: Force reset the iDRAC to default.
+ dellemc.openmanage.idrac_reset:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_to_default: "Default"
+ force_reset: true
+
+- name: Gracefully restart the iDRAC.
+ dellemc.openmanage.idrac_reset:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Reset the iDRAC to custom defaults XML and do not wait till the iDRAC is accessible.
+ dellemc.openmanage.idrac_reset:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_to_default: "CustomDefaults"
+ custom_defaults_file: "/path/to/custom_defaults.xml"
+
+- name: Reset the iDRAC to custom defaults buffer input and do not wait till the iDRAC is accessible.
dellemc.openmanage.idrac_reset:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- idrac_port: 443
- ca_path: "/path/to/ca_cert.pem"
-"""
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_to_default: "CustomDefaults"
+ custom_defaults_buffer: "<SystemConfiguration Model=\"PowerEdge R7525\" ServiceTag=\"ABCD123\">\n<Component FQDD=\"iDRAC.Embedded.1\">\n
+ <Attribute Name=\"IPMILan.1#Enable\">Disabled</Attribute>\n </Component>\n\n</SystemConfiguration>"
+'''
RETURN = r'''
---
msg:
+ type: str
description: Status of the iDRAC reset operation.
returned: always
- type: str
sample: "Successfully performed iDRAC reset."
reset_status:
- description: Details of iDRAC reset operation.
- returned: always
- type: dict
- sample: {
- "idracreset": {
+ type: dict
+ description: Details of iDRAC reset operation.
+ returned: reset operation is triggered.
+ sample: {
+ "idracreset": {
"Data": {
"StatusCode": 204
},
@@ -92,41 +180,382 @@ error_info:
}
'''
-
+import os
import json
-from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+import time
+from urllib.error import HTTPError, URLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (
+ get_idrac_firmware_version, remove_key, get_dynamic_uri, validate_and_get_first_resource_id_uri, idrac_redfish_job_tracking)
+
+
+MANAGERS_URI = "/redfish/v1/Managers"
+OEM = "Oem"
+MANUFACTURER = "Dell"
+ACTIONS = "Actions"
+IDRAC_RESET_RETRIES = 50
+LC_STATUS_CHECK_SLEEP = 30
+IDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+RESET_TO_DEFAULT_ERROR = "{reset_to_default} is not supported. The supported values are {supported_values}. Enter the valid values and retry the operation."
+RESET_TO_DEFAULT_ERROR_MSG = "{reset_to_default} is not supported."
+CUSTOM_ERROR = "{reset_to_default} is not supported on this firmware version of iDRAC. The supported values are {supported_values}. \
+Enter the valid values and retry the operation."
+IDRAC_RESET_RESTART_SUCCESS_MSG = "iDRAC restart operation completed successfully."
+IDRAC_RESET_SUCCESS_MSG = "Successfully performed iDRAC reset."
+IDRAC_RESET_RESET_TRIGGER_MSG = "iDRAC reset operation triggered successfully."
+IDRAC_RESET_RESTART_TRIGGER_MSG = "iDRAC restart operation triggered successfully."
+INVALID_DIRECTORY_MSG = "Provided directory path '{path}' is invalid."
+FAILED_RESET_MSG = "Failed to perform the reset operation."
+RESET_UNTRACK = "iDRAC reset is in progress. Changes will apply once the iDRAC reset operation is successfully completed."
+TIMEOUT_NEGATIVE_OR_ZERO_MSG = "The value of `job_wait_timeout` parameter cannot be negative or zero. Enter the valid value and retry the operation."
+INVALID_FILE_MSG = "File extension is invalid. Supported extension for 'custom_default_file' is: .xml."
+LC_STATUS_MSG = "Lifecycle controller status check is {lc_status} after {retries} number of retries, Exiting.."
+INSUFFICIENT_DIRECTORY_PERMISSION_MSG = "Provided directory path '{path}' is not writable. Please check if the directory has appropriate permissions."
+UNSUPPORTED_LC_STATUS_MSG = "Lifecycle controller status check is not supported."
+MINIMUM_SUPPORTED_FIRMWARE_VERSION = "7.00.00"
+CHANGES_NOT_FOUND = "No changes found to commit!"
+CHANGES_FOUND = "Changes found to commit!"
+ODATA_ID = "@odata.id"
+ODATA_REGEX = "(.*?)@odata"
+ATTRIBUTE = "</Attribute>"
+SUCCESS_STATUS = "Success"
+FAILED_STATUS = "Failed"
+STATUS_SUCCESS = [200, 202, 204]
+ERR_STATUS_CODE = [400, 404]
+PASSWORD_CHANGE_OPTIONS = ['All', 'ResetAllWithRootDefaults']
+RESET_KEY = "Oem.#DellManager.ResetToDefaults"
+GRACEFUL_RESTART_KEY = "#Manager.Reset"
+
+
+class Validation():
+ def __init__(self, idrac, module):
+ self.idrac = idrac
+ self.module = module
+ self.base_uri = self.get_base_uri()
+ def get_base_uri(self):
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, MANAGERS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ return uri
-def run_idrac_reset(idrac, module):
- if module.check_mode:
- msg = {'Status': 'Success', 'Message': 'Changes found to commit!', 'changes_applicable': True}
- else:
- idrac.use_redfish = True
- msg = idrac.config_mgr.reset_idrac()
- return msg
+ def validate_reset_options(self, api_key):
+ res = self.idrac.invoke_request(self.base_uri, "GET")
+ reset_to_default = self.module.params.get('reset_to_default')
+ key_list = api_key.split(".", 1)
+ is_valid = True
+ allowed_values = None
+ if key_list[0] in res.json_data["Actions"] and key_list[1] in res.json_data["Actions"][key_list[0]]:
+ reset_to_defaults_val = res.json_data["Actions"][key_list[0]][key_list[1]]
+ reset_type_values = reset_to_defaults_val["ResetType@Redfish.AllowableValues"]
+ allowed_values = reset_type_values
+ if reset_to_default not in reset_type_values:
+ is_valid = False
+ else:
+ is_valid = False
+ return allowed_values, is_valid
+
+ def validate_graceful_restart_option(self, api_key):
+ res = self.idrac.invoke_request(self.base_uri, "GET")
+ is_valid = True
+ if api_key in res.json_data["Actions"]:
+ reset_to_defaults_val = res.json_data["Actions"][api_key]
+ reset_type_values = reset_to_defaults_val["ResetType@Redfish.AllowableValues"]
+ if "GracefulRestart" not in reset_type_values:
+ is_valid = False
+ else:
+ is_valid = False
+ return is_valid
+
+ def validate_job_timeout(self):
+ if self.module.params.get("wait_for_idrac") and self.module.params.get("job_wait_timeout") <= 0:
+ self.module.exit_json(msg=TIMEOUT_NEGATIVE_OR_ZERO_MSG, failed=True)
+
+ def validate_path(self, file_path):
+ if not (os.path.exists(file_path)):
+ self.module.exit_json(msg=INVALID_DIRECTORY_MSG.format(path=file_path), failed=True)
+ if not os.access(file_path, os.W_OK):
+ self.module.exit_json(msg=INSUFFICIENT_DIRECTORY_PERMISSION_MSG.format(path=file_path), failed=True)
+
+ def validate_file_format(self, file_name):
+ if not (file_name.endswith(".xml")):
+ self.module.exit_json(msg=INVALID_FILE_MSG, failed=True)
+
+ def validate_custom_option(self, reset_to_default=None, allowed_choices=None):
+ url = None
+ resp = get_dynamic_uri(self.idrac, self.base_uri, OEM)
+ if resp:
+ url = resp.get(MANUFACTURER, {}).get('CustomDefaultsDownloadURI', {})
+ try:
+ if url:
+ self.idrac.invoke_request(url, "GET")
+ return True
+ return False
+ except HTTPError as err:
+ if err.code in ERR_STATUS_CODE:
+ self.module.exit_json(msg=RESET_TO_DEFAULT_ERROR.format(reset_to_default=reset_to_default, supported_values=allowed_choices), skipped=True)
+
+
+class FactoryReset():
+ def __init__(self, idrac, module, allowed_choices):
+ self.idrac = idrac
+ self.module = module
+ self.allowed_choices = allowed_choices
+ self.reset_to_default = self.module.params.get('reset_to_default')
+ self.force_reset = self.module.params.get('force_reset')
+ self.wait_for_idrac = self.module.params.get('wait_for_idrac')
+ self.validate_obj = Validation(self.idrac, self.module)
+ self.uri = self.validate_obj.base_uri
+ self.idrac_firmware_version = get_idrac_firmware_version(self.idrac)
+
+ def execute(self):
+ msg_res, job_res = None, None
+ self.validate_obj.validate_job_timeout()
+ is_idrac9 = self.is_check_idrac_latest()
+ if not is_idrac9 and self.reset_to_default:
+ allowed_values, is_valid_option = self.validate_obj.validate_reset_options(RESET_KEY)
+ if self.module.check_mode and not is_valid_option:
+ self.module.exit_json(msg=CHANGES_NOT_FOUND)
+ if not is_valid_option:
+ self.module.exit_json(msg=RESET_TO_DEFAULT_ERROR_MSG.format(reset_to_default=self.reset_to_default),
+ skipped=True)
+ if self.module.check_mode:
+ self.check_mode_output(is_idrac9)
+ if is_idrac9 and not self.force_reset:
+ self.check_lcstatus(post_op=False)
+ reset_status_mapping = {key: self.reset_to_default_mapped for key in ['Default', 'All', 'ResetAllWithRootDefaults']}
+ reset_status_mapping.update({
+ 'CustomDefaults': self.reset_custom_defaults,
+ 'None': self.graceful_restart
+ })
+ msg_res, job_res = reset_status_mapping[str(self.reset_to_default)]()
+ if is_idrac9 and self.wait_for_idrac:
+ self.check_lcstatus()
+ return msg_res, job_res
+
+ def check_mode_output(self, is_idrac9):
+ if is_idrac9 and self.reset_to_default == 'CustomDefaults' and LooseVersion(self.idrac_firmware_version) < MINIMUM_SUPPORTED_FIRMWARE_VERSION:
+ self.module.exit_json(msg=CHANGES_NOT_FOUND)
+ if self.reset_to_default:
+ allowed_values, is_valid_option = self.validate_obj.validate_reset_options(RESET_KEY)
+ else:
+ is_valid_option = self.validate_obj.validate_graceful_restart_option(GRACEFUL_RESTART_KEY)
+ custom_default_file = self.module.params.get('custom_defaults_file')
+ custom_default_buffer = self.module.params.get('custom_defaults_buffer')
+ if is_valid_option:
+ self.module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif self.reset_to_default and self.reset_to_default == 'CustomDefaults' and (custom_default_file or custom_default_buffer):
+ self.module.exit_json(msg=CHANGES_FOUND, changed=True)
+ else:
+ self.module.exit_json(msg=CHANGES_NOT_FOUND)
+
+ def is_check_idrac_latest(self):
+ if LooseVersion(self.idrac_firmware_version) >= '3.0':
+ return True
+
+ def check_lcstatus(self, post_op=True):
+ if self.reset_to_default in PASSWORD_CHANGE_OPTIONS and post_op and self.staus_code_after_wait == 401:
+ return
+ lc_status_dict = {}
+ lc_status_dict['LCStatus'] = ""
+ retry_count = 1
+ resp = get_dynamic_uri(self.idrac, self.uri, "Links")
+ url = resp.get(OEM, {}).get(MANUFACTURER, {}).get('DellLCService', {}).get(ODATA_ID, {})
+ if url:
+ action_resp = get_dynamic_uri(self.idrac, url)
+ lc_url = action_resp.get(ACTIONS, {}).get('#DellLCService.GetRemoteServicesAPIStatus', {}).get('target', {})
+ else:
+ self.module.exit_json(msg=UNSUPPORTED_LC_STATUS_MSG, failed=True)
+ while retry_count < IDRAC_RESET_RETRIES:
+ try:
+ lcstatus = self.idrac.invoke_request(lc_url, "POST", data="{}", dump=False)
+ lcstatus_data = lcstatus.json_data.get('LCStatus')
+ lc_status_dict['LCStatus'] = lcstatus_data
+ if lc_status_dict.get('LCStatus') == 'Ready':
+ break
+ time.sleep(10)
+ retry_count = retry_count + 1
+ except URLError:
+ time.sleep(10)
+ retry_count = retry_count + 1
+ if retry_count == IDRAC_RESET_RETRIES:
+ self.module.exit_json(msg=LC_STATUS_MSG.format(lc_status='unreachable', retries=IDRAC_RESET_RETRIES), unreachable=True)
+
+ if retry_count == IDRAC_RESET_RETRIES and lc_status_dict.get('LCStatus') != "Ready":
+ self.module.exit_json(msg=LC_STATUS_MSG.format(lc_status=lc_status_dict.get('LCStatus'), retries=retry_count), failed=True)
+
+ def create_output(self, status):
+ result = {}
+ tmp_res = {}
+ result['idracreset'] = {}
+ result['idracreset']['Data'] = {'StatusCode': status}
+ result['idracreset']['StatusCode'] = status
+ track_failed, wait_msg = None, None
+ self.staus_code_after_wait = 202
+ if status in STATUS_SUCCESS:
+ if self.wait_for_idrac:
+ track_failed, status_code, wait_msg = self.wait_for_port_open()
+ self.staus_code_after_wait = status_code
+ if track_failed:
+ self.module.exit_json(msg=wait_msg, changed=True)
+ tmp_res['msg'] = IDRAC_RESET_SUCCESS_MSG if self.wait_for_idrac else IDRAC_RESET_RESET_TRIGGER_MSG
+ tmp_res['changed'] = True
+ result['idracreset']['Message'] = IDRAC_RESET_SUCCESS_MSG if self.wait_for_idrac else IDRAC_RESET_RESET_TRIGGER_MSG
+ result['idracreset']['Status'] = 'Success'
+ result['idracreset']['retVal'] = True
+ else:
+ tmp_res['msg'] = FAILED_RESET_MSG
+ tmp_res['changed'] = False
+ result['idracreset']['Message'] = FAILED_RESET_MSG
+ result['idracreset']['Status'] = 'FAILED'
+ result['idracreset']['retVal'] = False
+ if self.reset_to_default:
+ result = None
+ return tmp_res, result
+
+ def perform_operation(self, payload):
+ tmp_res, res = None, None
+ url = None
+ resp = get_dynamic_uri(self.idrac, self.uri, ACTIONS)
+ if resp:
+ url = resp.get(OEM, {}).get('#DellManager.ResetToDefaults', {}).get('target', {})
+ run_reset_status = self.idrac.invoke_request(url, "POST", data=payload)
+ status = run_reset_status.status_code
+ tmp_res, res = self.create_output(status)
+ return tmp_res, res
+
+ def upload_cd_content(self, data):
+ payload = {"CustomDefaults": data}
+ job_wait_timeout = self.module.params.get('job_wait_timeout')
+ url = None
+ resp = get_dynamic_uri(self.idrac, self.uri, ACTIONS)
+ if resp:
+ url = resp.get(OEM, {}).get('#DellManager.SetCustomDefaults', {}).get('target', {})
+ job_resp = self.idrac.invoke_request(url, "POST", data=payload)
+ if (job_tracking_uri := job_resp.headers.get("Location")):
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = IDRAC_JOB_URI.format(job_id=job_id)
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(self.idrac, job_uri,
+ max_job_wait_sec=job_wait_timeout,
+ sleep_interval_secs=1)
+ job_dict = remove_key(job_dict, regex_pattern='(.*?)@odata')
+ if job_failed:
+ self.module.exit_json(msg=job_dict.get("Message"), job_status=job_dict, failed=True)
+
+ def wait_for_port_open(self, interval=45):
+ timeout_wait = self.module.params.get('job_wait_timeout')
+ time.sleep(interval)
+ msg = RESET_UNTRACK
+ wait = timeout_wait
+ track_failed = True
+ status_code = 503
+ while int(wait) > 0:
+ try:
+ self.idrac.invoke_request(MANAGERS_URI, 'GET')
+ time.sleep(interval)
+ msg = IDRAC_RESET_SUCCESS_MSG
+ track_failed = False
+ status_code = 200
+ break
+ except HTTPError as err:
+ status_code = err.code
+ if status_code == 401:
+ time.sleep(interval // 2)
+ msg = IDRAC_RESET_SUCCESS_MSG
+ track_failed = False
+ break
+ except Exception:
+ time.sleep(interval)
+ wait = wait - interval
+ return track_failed, status_code, msg
+
+ def reset_to_default_mapped(self):
+ payload = {"ResetType": self.reset_to_default}
+ self.allowed_choices, is_valid_option = self.validate_obj.validate_reset_options(RESET_KEY)
+ if not is_valid_option:
+ self.module.exit_json(msg=RESET_TO_DEFAULT_ERROR.format(reset_to_default=self.reset_to_default, supported_values=self.allowed_choices),
+ skipped=True)
+ return self.perform_operation(payload)
+
+ def get_xml_content(self, file_path):
+ with open(file_path, 'r') as file:
+ xml_content = file.read()
+ return xml_content
+
+ def reset_custom_defaults(self):
+ self.allowed_choices, is_valid_option = self.validate_obj.validate_reset_options(RESET_KEY)
+ if LooseVersion(self.idrac_firmware_version) < MINIMUM_SUPPORTED_FIRMWARE_VERSION:
+ self.module.exit_json(msg=CUSTOM_ERROR.format(reset_to_default=self.reset_to_default,
+ supported_values=self.allowed_choices), skipped=True)
+ custom_default_file = self.module.params.get('custom_defaults_file')
+ custom_default_buffer = self.module.params.get('custom_defaults_buffer')
+ upload_perfom = False
+ default_data = None
+ if custom_default_file:
+ self.validate_obj.validate_path(custom_default_file)
+ self.validate_obj.validate_file_format(custom_default_file)
+ upload_perfom = True
+ default_data = self.get_xml_content(custom_default_file)
+ elif custom_default_buffer:
+ upload_perfom = True
+ default_data = custom_default_buffer
+ if upload_perfom:
+ self.upload_cd_content(default_data)
+ self.validate_obj.validate_custom_option(self.reset_to_default, self.allowed_choices)
+ return self.reset_to_default_mapped()
+
+ def graceful_restart(self):
+ url = None
+ resp = get_dynamic_uri(self.idrac, self.uri, ACTIONS)
+ if resp:
+ url = resp.get('#Manager.Reset', {}).get('target', {})
+ payload = {"ResetType": "GracefulRestart"}
+ run_reset_status = self.idrac.invoke_request(url, "POST", data=payload)
+ status = run_reset_status.status_code
+ tmp_res, resp = self.create_output(status)
+ if status in STATUS_SUCCESS:
+ tmp_res['msg'] = IDRAC_RESET_SUCCESS_MSG
+ resp['idracreset']['Message'] = IDRAC_RESET_RESTART_SUCCESS_MSG if self.wait_for_idrac else IDRAC_RESET_RESTART_TRIGGER_MSG
+ return tmp_res, resp
def main():
- specs = {}
+ specs = {
+ "reset_to_default": {"choices": ['All', 'ResetAllWithRootDefaults', 'Default', 'CustomDefaults']},
+ "custom_defaults_file": {"type": "str"},
+ "custom_defaults_buffer": {"type": "str"},
+ "wait_for_idrac": {"type": "bool", "default": True},
+ "job_wait_timeout": {"type": 'int', "default": 600},
+ "force_reset": {"type": "bool", "default": False}
+ }
specs.update(idrac_auth_params)
module = AnsibleModule(
argument_spec=specs,
+ mutually_exclusive=[("custom_defaults_file", "custom_defaults_buffer")],
supports_check_mode=True)
-
try:
- with iDRACConnection(module.params) as idrac:
- msg = run_idrac_reset(idrac, module)
+ with iDRACRedfishAPI(module.params) as idrac:
+ allowed_choices = specs['reset_to_default']['choices']
+ reset_obj = FactoryReset(idrac, module, allowed_choices)
+ message_resp, output = reset_obj.execute()
+ if output:
+ if not message_resp.get('changed'):
+ module.exit_json(msg=message_resp.get('msg'), reset_status=output, failed=True)
+ module.exit_json(msg=message_resp.get('msg'), reset_status=output, changed=True)
+ else:
+ if not message_resp.get('changed'):
+ module.exit_json(msg=message_resp.get('msg'), failed=True)
+ module.exit_json(msg=message_resp.get('msg'), changed=True)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
- ImportError, ValueError, TypeError) as e:
- module.fail_json(msg=str(e))
- module.exit_json(msg="Successfully performed iDRAC reset.", reset_status=msg)
+ except (RuntimeError, SSLValidationError, ConnectionError, TypeError, KeyError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
index bd7fe2c67..f89272ca3 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
@@ -723,20 +723,15 @@ def export_scp_redfish(module, idrac):
share, scp_file_name_format = get_scp_share_details(module)
scp_components = ",".join(module.params["scp_components"])
include_in_export = IN_EXPORTS[module.params["include_in_export"]]
+ scp_response = idrac.export_scp(export_format=module.params["export_format"],
+ export_use=module.params["export_use"],
+ target=scp_components, include_in_export=include_in_export,
+ job_wait=False, share=share, ) # Assigning it as false because job tracking is done in idrac_redfish.py as well.
if share["share_type"] == "LOCAL":
- scp_response = idrac.export_scp(export_format=module.params["export_format"],
- export_use=module.params["export_use"],
- target=scp_components, include_in_export=include_in_export,
- job_wait=False, share=share, )
scp_response = wait_for_response(scp_response, module, share, idrac)
- else:
- scp_response = idrac.export_scp(export_format=module.params["export_format"],
- export_use=module.params["export_use"],
- target=scp_components, include_in_export=include_in_export,
- job_wait=False, share=share, ) # Assigning it as false because job tracking is done in idrac_redfish.py as well.
- scp_response = wait_for_job_tracking_redfish(
- module, idrac, scp_response
- )
+ scp_response = wait_for_job_tracking_redfish(
+ module, idrac, scp_response
+ )
scp_response = response_format_change(scp_response, module.params, scp_file_name_format)
exit_on_failure(module, scp_response, command)
return scp_response
@@ -753,8 +748,6 @@ def wait_for_response(scp_resp, module, share, idrac):
else:
wait_resp_value = wait_resp.decode("utf-8")
file_obj.write(wait_resp_value)
- if module.params["job_wait"]:
- scp_resp = idrac.invoke_request(job_uri, "GET")
return scp_resp
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_session.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_session.py
new file mode 100644
index 000000000..3303b4ade
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_session.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 9.2.0
+# Copyright (C) 2024 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: idrac_session
+short_description: Manage iDRAC sessions
+version_added: "9.2.0"
+description:
+ - This module allows the creation and deletion of sessions on iDRAC.
+options:
+ hostname:
+ description:
+ - IP address or hostname of the iDRAC.
+ type: str
+ username:
+ description:
+ - Username of the iDRAC.
+ - I(username) is required when I(state) is C(present).
+ type: str
+ password:
+ description:
+ - Password of the iDRAC.
+ - I(password) is required when I(state) is C(present).
+ type: str
+ port:
+ description:
+ - Port of the iDRAC.
+ type: int
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ timeout:
+ description:
+ - The https socket level timeout in seconds.
+ type: int
+ default: 30
+ state:
+ description:
+ - The state of the session in an iDRAC.
+ - C(present) creates a session.
+ - C(absent) deletes a session.
+ - Module will always report changes found to be applied when I(state) is C(present).
+ choices: [present, absent]
+ type: str
+ default: present
+ auth_token:
+ description:
+ - Authentication token.
+ - I(auth_token) is required when I(state) is C(absent).
+ type: str
+ session_id:
+ description:
+ - Session ID of the iDRAC.
+ - I(session_id) is required when I(state) is C(absent).
+ type: int
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Rajshekar P(@rajshekarp87)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+ - This module will always report changes found to be applied when I(state) is C(present).
+"""
+
+EXAMPLES = r"""
+---
+- name: Create a session
+ dellemc.openmanage.idrac_session:
+ hostname: 198.162.0.1
+ username: username
+ password: password
+ state: present
+
+- name: Delete a session
+ dellemc.openmanage.idrac_session:
+ hostname: 198.162.0.1
+ state: absent
+ auth_token: aed4aa802b748d2f3b31deec00a6b28a
+ session_is: 2
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of the session operation.
+ returned: always
+ type: str
+ sample: "The session has been created successfully."
+session_data:
+ description: The session details.
+ returned: For session creation operation
+ type: dict
+ sample: {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "The resource has been created successfully.",
+ "MessageArgs": [],
+ "MessageId": "Base.1.12.Created",
+ "RelatedProperties": [],
+ "Resolution": "None.",
+ "Severity": "OK"
+ },
+ {
+ "Message": "A new resource is successfully created.",
+ "MessageArgs": [],
+ "MessageId": "IDRAC.2.9.SYS414",
+ "RelatedProperties": [],
+ "Resolution": "No response action is required.",
+ "Severity": "Informational"
+ }
+ ],
+ "ClientOriginIPAddress": "100.96.37.58",
+ "CreatedTime": "2024-04-05T01:14:01-05:00",
+ "Description": "User Session",
+ "Id": "74",
+ "Name": "User Session",
+ "Password": null,
+ "SessionType": "Redfish",
+ "UserName": "root"
+ }
+x_auth_token:
+ description: Authentication token.
+ returned: For session creation operation
+ type: str
+ sample: "d15f17f01cd627c30173b1582642497d"
+error_info:
+ description: Details of the HTTP Error.
+ returned: On HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to complete the operation because an invalid username
+ and/or password is entered, and therefore authentication failed.",
+ "MessageArgs": [],
+ "MessageId": "IDRAC.2.9.SYS415",
+ "RelatedProperties": [],
+ "Resolution": "Enter valid user name and password and retry the operation.",
+ "Severity": "Warning"
+ }
+ ],
+ "code": "Base.1.12.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information"
+ }
+ }
+'''
+
+
+import json
+from urllib.error import HTTPError, URLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.session_utils import SessionAPI
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.common.parameters import env_fallback
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (
+ get_dynamic_uri, remove_key)
+
+REDFISH = "/redfish/v1"
+SESSIONS = "Sessions"
+ODATA = "@odata.id"
+ODATA_REGEX = "(.*?)@odata"
+
+CREATE_SUCCESS_MSG = "The session has been created successfully."
+DELETE_SUCCESS_MSG = "The session has been deleted successfully."
+FAILURE_MSG = "Unable to '{operation}' a session."
+CHANGES_FOUND_MSG = "Changes found to be applied."
+NO_CHANGES_FOUND_MSG = "No changes found to be applied."
+
+
+class Session():
+ """
+ Parent class for all session operations.
+ """
+ def __init__(self, idrac, module):
+ """
+ Initializes the object with the given idrac and module parameters.
+
+ Args:
+ idrac (object): The idrac object.
+ module (object): The module object.
+
+ Returns:
+ None
+ """
+ self.idrac = idrac
+ self.module = module
+
+ def get_session_url(self):
+ """
+ Retrieves the URL for the sessions endpoint from the Redfish API.
+
+ Returns:
+ str: The URL for the sessions endpoint, or None if not found.
+ """
+ v1_resp = get_dynamic_uri(self.idrac, REDFISH)
+ sessions_url = v1_resp.get('Links', {}).get(SESSIONS, {}).get(ODATA, {})
+ return sessions_url
+
+
+class CreateSession(Session):
+ """
+ Creates a session.
+ """
+ def execute(self):
+ """
+ Executes the session creation process.
+
+ This function creates a session by sending a POST request to the session URL with the
+ provided username and password.
+ If the request is successful (status code 201), it retrieves the session details, removes
+ any OData keys from the response,
+ and extracts the X-Auth-Token from the response headers. It then exits the module with a
+ success message, indicating that
+ the session was created successfully, and provides the session data and X-Auth-Token as
+ output variables.
+
+ If the request fails (status code other than 201), it exits the module with a failure
+ message, indicating that the session creation failed.
+
+ Parameters:
+ None
+
+ Returns:
+ None
+ """
+ payload = {"UserName": self.module.params.get("username"),
+ "Password": self.module.params.get("password")}
+ session_url = self.get_session_url()
+ if self.module.check_mode:
+ self.module.exit_json(msg=CHANGES_FOUND_MSG, changed=True)
+ session_response = self.idrac.invoke_request(session_url, "POST", data=payload)
+ status = session_response.status_code
+ if status == 201:
+ session_details = session_response.json_data
+ session_data = remove_key(session_details, regex_pattern=ODATA_REGEX)
+ x_auth_token = session_response.headers.get('X-Auth-Token')
+ self.module.exit_json(msg=CREATE_SUCCESS_MSG,
+ changed=True,
+ session_data=session_data,
+ x_auth_token=x_auth_token)
+ else:
+ self.module.exit_json(msg=FAILURE_MSG.format(operation="create"), failed=True)
+
+
+class DeleteSession(Session):
+ """
+ Deletes a session.
+ """
+ def execute(self):
+ """
+ Executes the deletion of a session.
+
+ This function retrieves the session ID from the module parameters and constructs the
+ session URL using the `get_session_url` method. It then invokes a DELETE request to the
+ session URL with the session ID appended. The response from the request is stored in the
+ `session_response` variable.
+
+ If the response status code is 200, indicating a successful deletion, the function exits
+ the module with a success message and sets the `changed` parameter to True. Otherwise, it
+ exits the module with a failure message and sets the `failed` parameter to True.
+
+ Parameters:
+ None
+
+ Returns:
+ None
+ """
+ session_id = self.module.params.get("session_id")
+ session_url = self.get_session_url()
+ session_status = self.get_session_status(session_url, session_id)
+ if self.module.check_mode:
+ if session_status == 200:
+ self.module.exit_json(msg=CHANGES_FOUND_MSG, changed=True)
+ else:
+ self.module.exit_json(msg=NO_CHANGES_FOUND_MSG)
+ else:
+ if session_status == 200:
+ try:
+ session_response = self.idrac.invoke_request(session_url + f"/{session_id}",
+ "DELETE")
+ status = session_response.status_code
+ if status == 200:
+ self.module.exit_json(msg=DELETE_SUCCESS_MSG, changed=True)
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ self.module.exit_json(msg=FAILURE_MSG.format(operation="delete"),
+ error_info=filter_err,
+ failed=True)
+ else:
+ self.module.exit_json(msg=NO_CHANGES_FOUND_MSG)
+
+ def get_session_status(self, session_url, session_id):
+ """
+ Retrieves the status of a session given its URL and ID.
+
+ Args:
+ session_url (str): The URL of the session.
+ session_id (str): The ID of the session.
+
+
+ Returns:
+ int: The status code of the session status response. If an HTTPError occurs, the status
+ code of the error is returned.
+ """
+ try:
+ session_status_response = self.idrac.invoke_request(session_url + f"/{session_id}",
+ "GET")
+ session_status = session_status_response.status_code
+ except HTTPError as err:
+ session_status = err.status
+ return session_status
+
+
+def main():
+ """
+ Main function that initializes the Ansible module with the argument specs and required if
+ conditions.
+ It then creates a SessionAPI object with the module parameters and performs a session operation
+ based on the state parameter.
+ If the state is "present", it creates a CreateSession object and executes it. If the state is
+ "absent", it creates a DeleteSession object and executes it.
+ The session status is returned.
+
+ Raises:
+ HTTPError: If an HTTP error occurs, the error message and filtered error information are
+ returned in the module's exit_json.
+ URLError: If a URL error occurs, the error message is returned in the module's exit_json.
+ SSLValidationError, ConnectionError, TypeError, ValueError, OSError: If any other error
+ occurs, the error message is returned in the module's exit_json.
+
+ Returns:
+ None
+ """
+ specs = get_argument_spec()
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ["state", "present", ("username", "password",)],
+ ["state", "absent", ("auth_token", "session_id",)]
+ ],
+ supports_check_mode=True
+ )
+
+ try:
+ idrac = SessionAPI(module.params)
+ session_operation = module.params.get("state")
+ if session_operation == "present":
+ session_operation_obj = CreateSession(idrac, module)
+ else:
+ session_operation_obj = DeleteSession(idrac, module)
+ session_operation_obj.execute()
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ module.exit_json(msg=str(err), error_info=filter_err, failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+def get_argument_spec():
+ """
+ Returns a dictionary representing the argument specification for a module.
+
+ The dictionary contains the following keys and their corresponding values:
+ - "hostname": A string representing the hostname.
+ - "username": A string representing the username. It has a fallback option to retrieve the
+ value from the environment variable 'IDRAC_USERNAME'.
+ - "password": A string representing the password. It is marked as not to be logged and has a
+ fallback option to retrieve the value from the environment variable 'IDRAC_PASSWORD'.
+ - "port": An integer representing the port number. The default value is 443.
+ - "validate_certs": A boolean representing whether to validate certificates. The default value
+ is True.
+ - "ca_path": A path representing the certificate authority path. The default value is None.
+ - "timeout": An integer representing the timeout value. The default value is 30.
+ - "state": A string representing the state. The default value is "present". The choices are
+ ["present", "absent"].
+ - "auth_token": A string representing the authentication token. It is marked as not to be
+ logged.
+ - "session_id": An integer representing the session ID.
+
+ Returns:
+ A dictionary representing the argument specification.
+ """
+ return {
+ "hostname": {"type": "str"},
+ "username": {"type": "str", "fallback": (env_fallback, ['IDRAC_USERNAME'])},
+ "password": {"type": "str", "no_log": True, "fallback": (env_fallback, ['IDRAC_PASSWORD'])},
+ "port": {"type": "int", "default": 443},
+ "validate_certs": {"type": "bool", "default": True},
+ "ca_path": {"type": "path", "default": None},
+ "timeout": {"type": "int", "default": 30},
+ "state": {"type": 'str', "default": "present", "choices": ["present", "absent"]},
+ "auth_token": {"type": "str", "no_log": True},
+ "session_id": {"type": "int"}
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_storage_volume.py
new file mode 100644
index 000000000..13c1ec065
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_storage_volume.py
@@ -0,0 +1,924 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 9.1.0
+# Copyright (C) 2024 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_storage_volume
+short_description: Configures the RAID configuration attributes
+version_added: "9.1.0"
+description:
+ - This module is responsible for configuring the RAID attributes.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ state:
+ type: str
+ description:
+ - C(create), performs create volume operation.
+ - C(delete), performs remove volume operation.
+ - C(view), returns storage view.
+ choices: ['create', 'delete', 'view']
+ default: 'view'
+ span_depth:
+ type: int
+ description:
+ - Number of spans in the RAID configuration.
+ - I(span_depth) is required for C(create) and its value depends on I(volume_type).
+ default: 1
+ span_length:
+ type: int
+ description:
+ - Number of disks in a span.
+ - I(span_length) is required for C(create) and its value depends on I(volume_type).
+ default: 1
+ number_dedicated_hot_spare:
+ type: int
+ description: Number of Dedicated Hot Spare.
+ default: 0
+ volume_type:
+ type: str
+ description: Provide the the required RAID level.
+ choices: ['RAID 0', 'RAID 1', 'RAID 5', 'RAID 6', 'RAID 10', 'RAID 50', 'RAID 60']
+ default: 'RAID 0'
+ disk_cache_policy:
+ type: str
+ description: Disk Cache Policy.
+ choices: ["Default", "Enabled", "Disabled"]
+ default: "Default"
+ write_cache_policy:
+ type: str
+ description: Write cache policy.
+ choices: ["WriteThrough", "WriteBack", "WriteBackForce"]
+ default: "WriteThrough"
+ read_cache_policy:
+ type: str
+ description: Read cache policy.
+ choices: ["NoReadAhead", "ReadAhead", "AdaptiveReadAhead"]
+ default: "NoReadAhead"
+ stripe_size:
+ type: int
+ description: Stripe size value to be provided in multiples of 64 * 1024.
+ default: 65536
+ controller_id:
+ type: str
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the storage controller, for example 'RAID.Integrated.1-1'.
+ Controller FQDD is required for C(create) RAID configuration.
+ media_type:
+ type: str
+ description: Media type.
+ choices: ['HDD', 'SSD']
+ protocol:
+ type: str
+ description: Bus protocol.
+ choices: ['SAS', 'SATA', 'PCIE']
+ volume_id:
+ type: str
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the virtual disk, for example 'Disk.virtual.0:RAID.Slot.1-1'.
+ This option is used to get the virtual disk information.
+ volumes:
+ type: list
+ elements: dict
+ description:
+ - A list of virtual disk specific iDRAC attributes. This is applicable for C(create) and C(delete) operations.
+ - For C(create) operation, name and drives are applicable options, other volume options can also be specified.
+ - The drives is a required option for C(create) operation and accepts either location (list of drive slot)
+ or id (list of drive fqdd).
+ - In iDRAC8, there is no pre-validation for the state of drives. The disk ID or slot number of the drive
+ provided may or may not be in Ready state. Enter the disk ID or slot number of the drive that is already
+ in Ready state.
+ - For C(delete) operation, only name option is applicable.
+ - See the examples for more details.
+ capacity:
+ type: float
+ description: Virtual disk size in GB.
+ raid_reset_config:
+ type: str
+ description:
+ - This option represents whether a reset config operation needs to be performed on the RAID controller.
+ Reset Config operation deletes all the virtual disks present on the RAID controller.
+ choices: ['true', 'false']
+ default: 'false'
+ raid_init_operation:
+ type: str
+ description: This option represents initialization configuration operation to be performed on the virtual disk.
+ choices: [None, Fast]
+ job_wait:
+ description:
+ - This parameter provides the option to wait for the job completion.
+ - This is applicable when I(state) is C(create) or C(delete).
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - This parameter is the maximum wait time of I(job_wait) in seconds.
+ - This option is applicable when I(job_wait) is C(true).
+ type: int
+ default: 900
+
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Kritika Bhateja (@Kritika-Bhateja-03)"
+ - "Abhishek Sinha(@ABHISHEK-SINHA10)"
+notes:
+ - Run this module from a system that has direct access to Integrated Dell Remote Access Controller.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
+ - This module supports C(check_mode).
+ - This module does not display the controller battery details for the C(view) operation of the storage in iDRAC8.
+'''
+
+EXAMPLES = r'''
+---
+- name: Create single volume
+ dellemc.openmanage.idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "create"
+ controller_id: "RAID.Slot.1-1"
+ volumes:
+ - drives:
+ location: [5]
+
+- name: Create multiple volume
+ dellemc.openmanage.idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ raid_reset_config: "True"
+ state: "create"
+ controller_id: "RAID.Slot.1-1"
+ volume_type: "RAID 1"
+ span_depth: 1
+ span_length: 2
+ number_dedicated_hot_spare: 1
+ disk_cache_policy: "Enabled"
+ write_cache_policy: "WriteBackForce"
+ read_cache_policy: "ReadAhead"
+ stripe_size: 65536
+ capacity: 100
+ raid_init_operation: "Fast"
+ volumes:
+ - name: "volume_1"
+ drives:
+ id: ["Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1", "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Slot.1-1"]
+ - name: "volume_2"
+ volume_type: "RAID 5"
+ span_length: 3
+ span_depth: 1
+ drives:
+ location: [7, 3, 5]
+ disk_cache_policy: "Disabled"
+ write_cache_policy: "WriteBack"
+ read_cache_policy: "NoReadAhead"
+ stripe_size: 131072
+ capacity: "200"
+ raid_init_operation: "None"
+
+- name: View all volume details
+ dellemc.openmanage.idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "view"
+
+- name: View specific volume details
+ dellemc.openmanage.idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "view"
+ controller_id: "RAID.Slot.1-1"
+ volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
+
+- name: Delete single volume
+ dellemc.openmanage.idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "delete"
+ volumes:
+ - name: "volume_1"
+
+- name: Delete multiple volume
+ dellemc.openmanage.idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "delete"
+ volumes:
+ - name: "volume_1"
+ - name: "volume_2"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the storage configuration operation.
+ returned: always
+ sample: "Successfully completed the view storage volume operation"
+storage_status:
+ type: dict
+ description: Storage configuration job and progress details from the iDRAC.
+ returned: success
+ sample:
+ {
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageId": "XXX123",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "TargetSettingsURI": null,
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import re
+import operator
+from urllib.error import HTTPError, URLError
+from copy import deepcopy
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (
+ get_dynamic_uri, validate_and_get_first_resource_id_uri, xml_data_conversion, idrac_redfish_job_tracking, remove_key, get_idrac_firmware_version)
+
+
+SYSTEMS_URI = "/redfish/v1/Systems"
+iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+CONTROLLER_NOT_EXIST_ERROR = "Specified Controller {controller_id} does not exist in the System."
+CONTROLLER_NOT_DEFINED = "Controller ID is required."
+SUCCESSFUL_OPERATION_MSG = "Successfully completed the {operation} storage volume operation."
+DRIVES_NOT_EXIST_ERROR = "No Drive(s) are attached to the specified Controller Id: {controller_id}."
+DRIVES_NOT_MATCHED = "Following Drive(s) {specified_drives} are not attached to the specified Controller Id: {controller_id}."
+NEGATIVE_OR_ZERO_MSG = "The value for the `{parameter}` parameter cannot be negative or zero."
+NEGATIVE_MSG = "The value for the `{parameter}` parameter cannot be negative."
+INVALID_VALUE_MSG = "The value for the `{parameter}` parameter is invalid."
+ID_AND_LOCATION_BOTH_DEFINED = "Either id or location is allowed."
+ID_AND_LOCATION_BOTH_NOT_DEFINED = "Either id or location should be specified."
+DRIVES_NOT_DEFINED = "Drives must be defined for volume creation."
+NOT_ENOUGH_DRIVES = "Number of sufficient disks not found in Controller '{controller_id}'!"
+WAIT_TIMEOUT_MSG = "The job is not complete after {0} seconds."
+JOB_TRIGERRED = "Successfully triggered the {0} storage volume operation."
+VOLUME_NAME_REQUIRED_FOR_DELETE = "Virtual disk name is a required parameter for remove virtual disk operations."
+VOLUME_NOT_FOUND = "Unable to find the virtual disk."
+CHANGES_NOT_FOUND = "No changes found to commit!"
+CHANGES_FOUND = "Changes found to commit!"
+ODATA_ID = "@odata.id"
+ODATA_REGEX = "(.*?)@odata"
+ATTRIBUTE = "</Attribute>"
+VIEW_OPERATION_FAILED = "Failed to fetch storage details."
+VIEW_CONTROLLER_DETAILS_NOT_FOUND = "Failed to find the controller {controller_id}."
+VIEW_OPERATION_CONTROLLER_NOT_SPECIFIED = "Controller identifier parameter is missing."
+VIEW_VIRTUAL_DISK_DETAILS_NOT_FOUND = "Failed to find the volume : {volume_id} in controller : {controller_id}."
+SUCCESS_STATUS = "Success"
+FAILED_STATUS = "Failed"
+ERROR_CODES = ["SYS041", "SYS044", "SYS045", "SYS046", "SYS047", "SYS048", "SYS050", "SYS051", "SYS062",
+ "SYS063", "SYS064", "SYS065", "SYS067", "SYS068", "SYS070", "SYS071", "SYS072",
+ "SYS073", "SYS075", "SYS076", "SYS077", "SYS078", "SYS079", "SYS080"]
+
+
+class StorageBase:
+ def __init__(self, idrac, module):
+ self.module_ext_params = self.module_extend_input(module)
+ self.idrac = idrac
+ self.module = module
+
+ def data_conversion(self, module, each_volume):
+ volume_related_input = [
+ 'volume_type', 'span_length', 'span_depth',
+ 'number_dedicated_hot_spare', 'disk_cache_policy',
+ 'write_cache_policy', 'read_cache_policy', 'stripe_size',
+ 'capacity', 'raid_init_operation', 'protocol', 'media_type'
+ ]
+ for key in volume_related_input:
+ value = module.params.get(key)
+ if key not in each_volume:
+ each_volume[key] = value
+ return each_volume
+
+ def module_extend_input(self, module):
+ """
+ Extends the input module with additional volume-related parameters.
+
+ Args:
+ module (object): The module object.
+
+ Returns:
+ object: The updated module object.
+ """
+
+ module_copy = deepcopy(module.params)
+ volumes = module_copy.get('volumes')
+ if volumes:
+ for index in range(len(volumes)):
+ volumes[index] = self.data_conversion(module, volumes[index])
+ else:
+ tmp_volume = self.data_conversion(module, {})
+ required_pd = int(module_copy.get('span_length', 1)) * int(module_copy.get('span_depth', 1))
+ tmp_volume['drives'] = {'id': [(-i) for i in range(1, required_pd + 1)]}
+ module_copy['volumes'] = [tmp_volume]
+
+ int_input = ['span_length', 'span_depth', 'number_dedicated_hot_spare',
+ 'stripe_size']
+ if volumes:
+ for each_volume in volumes:
+ for each_input in each_volume:
+ if each_input in int_input:
+ each_volume[each_input] = int(each_volume[each_input])
+ return module_copy
+
+ def payload_for_disk(self, volume):
+ disk_payload = ''
+ if 'drives' in volume and 'id' in volume['drives']:
+ for each_pd_id in volume['drives']['id']:
+ scp = '<Attribute Name="IncludedPhysicalDiskID">{id}</Attribute>'.format(id=each_pd_id)
+ disk_payload = disk_payload + scp
+ if 'dedicated_hot_spare' in volume:
+ for each_dhs in volume['dedicated_hot_spare']:
+ scp = '<Attribute Name="RAIDdedicatedSpare">{id}</Attribute>'.format(id=each_dhs)
+ disk_payload = disk_payload + scp
+ return disk_payload
+
+ def construct_volume_payload(self, vd_id, volume):
+
+ """
+ Constructs a payload dictionary for the given key mappings.
+
+ Returns:
+ dict: The constructed payload dictionary.
+ """
+ key_mapping: dict = {
+ 'raid_init_operation': 'RAIDinitOperation',
+ 'state': "RAIDaction",
+ 'disk_cache_policy': "DiskCachePolicy",
+ 'write_cache_policy': "RAIDdefaultWritePolicy",
+ 'read_cache_policy': "RAIDdefaultReadPolicy",
+ 'stripe_size': "StripeSize",
+ 'span_depth': "SpanDepth",
+ 'span_length': "SpanLength",
+ 'volume_type': "RAIDTypes",
+ 'name': 'Name',
+ 'capacity': 'Size',
+ }
+ controller_id = self.module_ext_params.get("controller_id")
+ state = self.module_ext_params.get("state")
+ # Including state in each_volume as it mapped to RAIDaction
+ volume.update({'state': state.capitalize()})
+ payload = ''
+ attr = {}
+ vdfqdd = "Disk.Virtual.{0}:{1}".format(vd_id, controller_id)
+ for key in volume:
+ if volume[key] and key in key_mapping:
+ attr[key_mapping[key]] = volume[key]
+ disk_paylod = self.payload_for_disk(volume)
+ payload = xml_data_conversion(attr, vdfqdd, disk_paylod)
+ return payload
+
+ def constuct_payload(self, name_id_mapping):
+ number_of_existing_vd = len(name_id_mapping)
+ volume_payload, attr = '', {}
+ raid_reset_config_value = self.module_ext_params.get('raid_reset_config')
+ raid_key_mapping = {'raid_reset_config': 'RAIDresetConfig'}
+ if raid_reset_config_value == 'true':
+ raid_reset_config_value = 'True'
+ attr = {raid_key_mapping['raid_reset_config']: raid_reset_config_value}
+ for each_volume in self.module_ext_params.get('volumes'):
+ volume_payload = volume_payload + self.construct_volume_payload(number_of_existing_vd,
+ each_volume)
+ number_of_existing_vd = number_of_existing_vd + 1
+ raid_payload = xml_data_conversion(attr, self.module_ext_params.get('controller_id'), volume_payload)
+ return raid_payload
+
+ def wait_for_job_completion(self, job_resp):
+ job_wait = self.module_ext_params.get('job_wait')
+ job_wait_timeout = self.module_ext_params.get('job_wait_timeout')
+ job_dict = {}
+ if (job_tracking_uri := job_resp.headers.get("Location")):
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = iDRAC_JOB_URI.format(job_id=job_id)
+ if job_wait:
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(self.idrac, job_uri,
+ max_job_wait_sec=job_wait_timeout,
+ sleep_interval_secs=1)
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ if int(wait_time) >= int(job_wait_timeout):
+ self.module.exit_json(msg=WAIT_TIMEOUT_MSG.format(job_wait_timeout), changed=True, storage_status=job_dict)
+ if job_failed or job_dict.get("MessageId", "") in ERROR_CODES:
+ self.module.exit_json(msg=job_dict.get("Message"), storage_status=job_dict, failed=True)
+ else:
+ job_resp = self.idrac.invoke_request(job_uri, 'GET')
+ job_dict = job_resp.json_data
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ self.module.exit_json(msg=JOB_TRIGERRED.format(self.module.params.get('state')), storage_status=job_dict, changed=True)
+ return job_dict
+
+
+class StorageData:
+ def __init__(self, idrac, module):
+ self.idrac = idrac
+ self.module = module
+
+ def fetch_controllers_uri(self):
+ uri, err_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, SYSTEMS_URI)
+ if err_msg:
+ self.module.exit_json(msg=err_msg, failed=True)
+ storage_controllers = get_dynamic_uri(self.idrac, uri, 'Storage')
+ return storage_controllers
+
+ def fetch_api_data(self, uri, key_index_from_end):
+ key = uri.split("/")[key_index_from_end]
+ uri_data = self.idrac.invoke_request(uri, "GET")
+ return key, uri_data
+
+ def all_storage_data(self):
+ storage_info = {"Controllers": {}}
+ controllers_details_uri = self.fetch_controllers_uri()[ODATA_ID] + "?$expand=*($levels=1)"
+ controllers_list = get_dynamic_uri(self.idrac, controllers_details_uri)
+ for each_controller in controllers_list["Members"]:
+ controller_id = each_controller.get("Id")
+ if controller_id.startswith("CPU"):
+ continue
+ storage_info["Controllers"][controller_id] = deepcopy(each_controller)
+ storage_info["Controllers"][controller_id]["Drives"] = {}
+ storage_info["Controllers"][controller_id]["Volumes"] = {}
+ storage_info["Controllers"][controller_id]["Links"]["Enclosures"] = {}
+ # To fetch drives data
+ for each_drive_uri in each_controller["Drives"]:
+ key, uri_data = self.fetch_api_data(each_drive_uri[ODATA_ID], -1)
+ storage_info["Controllers"][controller_id]["Drives"][key] = uri_data.json_data
+
+ # To fetch volumes data
+ volume_uri = each_controller['Volumes'][ODATA_ID]
+ volumes_list = get_dynamic_uri(self.idrac, volume_uri, "Members")
+ for each_volume_uri in volumes_list:
+ key, uri_data = self.fetch_api_data(each_volume_uri[ODATA_ID], -1)
+ storage_info["Controllers"][controller_id]["Volumes"][key] = uri_data.json_data
+ # To fetch enclosures
+ for each_enclosure_uri in each_controller["Links"]["Enclosures"]:
+ key, uri_data = self.fetch_api_data(each_enclosure_uri[ODATA_ID], -1)
+ storage_info["Controllers"][controller_id]["Links"]["Enclosures"][key] = uri_data.json_data
+ return storage_info
+
+ def fetch_storage_data(self):
+ storage_info = {"Controller": {}}
+ storage_data = self.all_storage_data()
+ firm_ver = get_idrac_firmware_version(self.idrac)
+ for controller_id, controller_data in storage_data["Controllers"].items():
+ storage_info["Controller"][controller_id] = {
+ "ControllerSensor": {controller_id: {}}
+ }
+ if firm_ver >= "3.00":
+ battery_data = controller_data["Oem"]["Dell"].get("DellControllerBattery")
+ if battery_data:
+ storage_info["Controller"][controller_id]["ControllerSensor"][controller_id]["ControllerBattery"] = [battery_data["Id"]]
+ self.fetch_volumes(controller_id, controller_data, storage_info)
+ self.fetch_enclosures_and_physical_disk(controller_id, controller_data, storage_info)
+ return storage_info
+
+ def fetch_volumes(self, controller_id, controller_data, storage_info):
+ if controller_data["Volumes"]:
+ storage_info.setdefault("Controller", {}).setdefault(controller_id, {})["VirtualDisk"] = {}
+ for volume_id, volume_data in controller_data["Volumes"].items():
+ physical_disk = [self.fetch_api_data(drive[ODATA_ID], -1)[0] for drive in volume_data["Links"]["Drives"]]
+ storage_info["Controller"][controller_id]["VirtualDisk"][volume_id] = {"PhysicalDisk": physical_disk}
+
+ def fetch_enclosures_and_physical_disk(self, controller_id, controller_data, storage_info):
+ enclosures = [enclosure_id for enclosure_id in controller_data["Links"]["Enclosures"].keys() if enclosure_id.startswith("Enclosure")]
+ if len(enclosures) >= 1:
+ storage_info.setdefault("Controller", {})
+ storage_info["Controller"].setdefault(controller_id, {})
+ storage_info["Controller"][controller_id].setdefault("Enclosure", {})
+ for enclosure_id in enclosures:
+ storage_info["Controller"][controller_id]["Enclosure"][enclosure_id] = {"EnclosureSensor": {enclosure_id: {}}}
+ physical_disk = [self.fetch_api_data(drive[ODATA_ID], -1)[0] for drive in
+ controller_data["Links"]["Enclosures"][enclosure_id]["Links"]["Drives"]]
+ if physical_disk:
+ storage_info["Controller"][controller_id]["Enclosure"][enclosure_id]["PhysicalDisk"] = physical_disk
+ else:
+ if controller_data["Drives"].keys():
+ storage_info["Controller"][controller_id]["PhysicalDisk"] = list(controller_data["Drives"].keys())
+
+
+class StorageValidation(StorageBase):
+ def __init__(self, idrac, module):
+ super().__init__(idrac, module)
+ self.idrac_data = StorageData(idrac, module).all_storage_data()
+ self.controller_id = module.params.get("controller_id")
+
+ def validate_controller_exists(self):
+ if not self.controller_id:
+ self.module.exit_json(msg=CONTROLLER_NOT_DEFINED, failed=True)
+ controllers = self.idrac_data["Controllers"]
+ if self.controller_id not in controllers.keys():
+ self.module.exit_json(msg=CONTROLLER_NOT_EXIST_ERROR.format(controller_id=self.controller_id), failed=True)
+
+ def validate_job_wait_negative_values(self):
+ if self.module_ext_params.get("job_wait") and self.module_ext_params.get("job_wait_timeout") <= 0:
+ self.module.exit_json(msg=NEGATIVE_OR_ZERO_MSG.format(parameter="job_wait_timeout"), failed=True)
+
+ def validate_negative_values_for_volume_params(self, each_volume):
+ inner_params = ["span_depth", "span_length", "capacity", "strip_size"]
+ for param in inner_params:
+ value = each_volume.get(param)
+ if value is not None and value <= 0:
+ self.module.exit_json(msg=NEGATIVE_OR_ZERO_MSG.format(parameter=param), failed=True)
+ if each_volume.get("number_dedicated_hot_spare") < 0:
+ self.module.exit_json(msg=NEGATIVE_MSG.format(parameter="number_dedicated_hot_spare"), failed=True)
+
+ def validate_volume_drives(self, specified_volume):
+ specified_drives = specified_volume.get("drives")
+ if not specified_drives:
+ self.module.exit_json(msg=DRIVES_NOT_DEFINED, failed=True)
+ if specified_drives.get("id") and specified_drives.get("location"):
+ self.module.exit_json(msg=ID_AND_LOCATION_BOTH_DEFINED, failed=True)
+ elif "id" not in specified_drives and "location" not in specified_drives:
+ self.module.exit_json(msg=ID_AND_LOCATION_BOTH_NOT_DEFINED, failed=True)
+ drives_count = len(specified_drives.get("location")) if specified_drives.get("location") is not None else len(specified_drives.get("id"))
+ return self.raid_std_validation(specified_volume.get("span_length"),
+ specified_volume.get("span_depth"),
+ specified_volume.get("volume_type"),
+ drives_count)
+
+ def raid_std_validation(self, span_length, span_depth, volume_type, pd_count):
+ raid_std = {
+ "RAID 0": {'pd_slots': range(1, 2), 'span_length': 1, 'checks': operator.ge, 'span_depth': 1},
+ "RAID 1": {'pd_slots': range(1, 3), 'span_length': 2, 'checks': operator.eq, 'span_depth': 1},
+ "RAID 5": {'pd_slots': range(1, 4), 'span_length': 3, 'checks': operator.ge, 'span_depth': 1},
+ "RAID 6": {'pd_slots': range(1, 5), 'span_length': 4, 'checks': operator.ge, 'span_depth': 1},
+ "RAID 10": {'pd_slots': range(1, 5), 'span_length': 2, 'checks': operator.ge, 'span_depth': 2},
+ "RAID 50": {'pd_slots': range(1, 7), 'span_length': 3, 'checks': operator.ge, 'span_depth': 2},
+ "RAID 60": {'pd_slots': range(1, 9), 'span_length': 4, 'checks': operator.ge, 'span_depth': 2}
+ }
+ raid_info = raid_std.get(volume_type)
+ if not raid_info.get('checks')(span_length, raid_info.get('span_length')):
+ self.module.exit_json(msg=INVALID_VALUE_MSG.format(parameter="span_length"), failed=True)
+ if volume_type in ["RAID 0", "RAID 1", "RAID 5", "RAID 6"] and operator.ne(span_depth, raid_info.get('span_depth')):
+ self.module.exit_json(msg=INVALID_VALUE_MSG.format(parameter="span_depth"), failed=True)
+ if volume_type in ["RAID 10", "RAID 50", "RAID 60"] and operator.lt(span_depth, raid_info.get('span_depth')):
+ self.module.exit_json(msg=INVALID_VALUE_MSG.format(parameter="span_depth"), failed=True)
+ if not operator.eq(pd_count, span_depth * span_length):
+ self.module.exit_json(msg=INVALID_VALUE_MSG.format(parameter="drives"), failed=True)
+ return True
+
+
+class StorageCreate(StorageValidation):
+ def disk_slot_location_to_id_conversion(self, each_volume):
+ drives = {}
+ if "location" in each_volume['drives']:
+ regex_pattern = r"\d+"
+ physical_disk = self.idrac_data["Controllers"][self.controller_id]["Drives"]
+ slot_id_mapping = {int(re.search(regex_pattern, key).group()): key for key in physical_disk.keys()}
+ drives['id'] = [slot_id_mapping.get(each_pd) for each_pd in each_volume['drives']['location']
+ if slot_id_mapping.get(each_pd)]
+ elif "id" in each_volume['drives']:
+ drives['id'] = each_volume['drives']['id']
+ return drives
+
+ def perform_intersection_on_disk(self, each_volume, healthy_disk, available_disk,
+ media_type_supported_disk, protocol_supported_disk):
+ filtered_disk = healthy_disk
+ firm_ver = get_idrac_firmware_version(self.idrac)
+ if firm_ver >= "3.00":
+ filtered_disk = filtered_disk.intersection(available_disk)
+ if filtered_disk and each_volume.get('media_type'):
+ filtered_disk = filtered_disk.intersection(media_type_supported_disk)
+ if filtered_disk and each_volume.get('protocol'):
+ filtered_disk = filtered_disk.intersection(protocol_supported_disk)
+ return sorted(list(filtered_disk))
+
+ def filter_disk(self, each_volume):
+ disk_dict = self.idrac_data["Controllers"][self.controller_id]["Drives"]
+ healthy_disk = set()
+ available_disk = set()
+ media_type_supported_disk = set()
+ protocol_supported_disk = set()
+ raid_reset_config_value = self.module_ext_params.get('raid_reset_config')
+ raid_status_list = ["Ready", "NonRAID"]
+ if raid_reset_config_value == "true":
+ raid_status_list.append("Online")
+ for key, value in disk_dict.items():
+ if each_volume.get('media_type') and value.get('MediaType') == each_volume.get('media_type'):
+ media_type_supported_disk.add(key)
+ if each_volume.get('protocol') and value.get('Protocol') == each_volume.get('protocol'):
+ protocol_supported_disk.add(key)
+ status = value.get('Status', {}).get('Health', {})
+ if status == "OK":
+ healthy_disk.add(key)
+ raid_status = value.get('Oem', {}).get('Dell', {}).get('DellPhysicalDisk', {}).get('RaidStatus', {})
+ if raid_status in raid_status_list:
+ available_disk.add(key)
+ return self.perform_intersection_on_disk(each_volume, healthy_disk, available_disk,
+ media_type_supported_disk, protocol_supported_disk)
+
+ def updating_drives_module_input_when_given(self, each_volume, filter_disk_output):
+ updated_disk_id_list = []
+ if 'id' in each_volume['drives']:
+ for each_pd in each_volume['drives']['id']:
+ if each_pd in filter_disk_output:
+ updated_disk_id_list.append(each_pd)
+ return updated_disk_id_list
+
+ def updating_volume_module_input_for_hotspare(self, each_volume, filter_disk_output, reserved_pd, drives_exists_in_id):
+ tmp_list = []
+ if 'number_dedicated_hot_spare' in each_volume and each_volume['number_dedicated_hot_spare'] > 0:
+ for each_pd in filter_disk_output:
+ if each_pd not in reserved_pd and each_pd not in drives_exists_in_id:
+ tmp_list.append(each_pd)
+ if len(tmp_list) == each_volume['number_dedicated_hot_spare']:
+ break
+ return tmp_list
+
+ def updating_volume_module_input(self, drives_exists_in_id):
+ volumes = self.module_ext_params.get('volumes', [])
+ reserved_pd = []
+ for each in volumes:
+ required_pd = int(each['span_depth']) * int(each['span_length'])
+ filtered_disk = self.filter_disk(each)
+ if 'stripe_size' in each:
+ each['stripe_size'] = int(each['stripe_size'] / 512)
+
+ if each.get('capacity') is not None:
+ each['capacity'] = str(int(float(each['capacity']) * 1073741824))
+
+ if self.module.params.get('volumes') is None:
+ each['drives']['id'] = filtered_disk[:required_pd]
+
+ if 'drives' in each:
+ drives_id_list = self.updating_drives_module_input_when_given(each, filtered_disk)
+ reserved_pd += drives_id_list
+ each['drives']['id'] = drives_id_list
+
+ if 'number_dedicated_hot_spare' in each:
+ hotspare_disk_list = self.updating_volume_module_input_for_hotspare(each, filtered_disk, reserved_pd,
+ drives_exists_in_id)
+ reserved_pd += hotspare_disk_list
+ each['dedicated_hot_spare'] = hotspare_disk_list
+ self.validate_enough_drives_available(each)
+ if self.module.check_mode:
+ self.module.exit_json(msg=CHANGES_FOUND, changed=True)
+ self.module_ext_params['volumes'] = volumes
+
+ def validate_enough_drives_available(self, each_volume):
+ controller_id = self.module_ext_params.get('controller_id')
+ required_pd = each_volume['span_depth'] * each_volume['span_length']
+ drives_available = each_volume['drives']['id']
+ dedicated_hot_spare_required = int(each_volume['number_dedicated_hot_spare'])
+ dedicated_hot_spare_available = len(each_volume['dedicated_hot_spare'])
+ changed, failed = False, False
+ if (required_pd > len(drives_available) or dedicated_hot_spare_required != dedicated_hot_spare_available):
+ if not self.module.check_mode:
+ msg, failed = NOT_ENOUGH_DRIVES.format(controller_id=controller_id), True
+ else:
+ msg, changed = CHANGES_NOT_FOUND, False
+ self.module.exit_json(msg=msg, changed=changed, failed=failed)
+
+ def validate(self):
+ # Validate upper layer input
+ self.validate_controller_exists()
+ self.validate_job_wait_negative_values()
+ # Validate std raid validation for inner layer
+ drives_exists_in_id = []
+ for each_volume in self.module_ext_params.get('volumes', []):
+ # Validatiing for negative values
+ self.validate_negative_values_for_volume_params(each_volume)
+ self.validate_volume_drives(each_volume)
+ if 'location' in each_volume['drives'] and each_volume['drives']['location']:
+ each_volume['drives'] = self.disk_slot_location_to_id_conversion(each_volume)
+ drives_exists_in_id += each_volume['drives']['id']
+ # Extendeding volume module input in module_ext_params for drives id and hotspare
+ self.updating_volume_module_input(drives_exists_in_id)
+
+ def execute(self):
+ self.validate()
+ job_dict = {}
+ name_id_mapping = {value.get('Name'): key for key, value in self.idrac_data["Controllers"][self.controller_id]["Volumes"].items()}
+ parent_payload = """<SystemConfiguration>{0}</SystemConfiguration>"""
+ payload = self.constuct_payload(name_id_mapping)
+ parent_payload = parent_payload.format(payload)
+ resp = self.idrac.import_scp(import_buffer=parent_payload, target="RAID", job_wait=False)
+ job_dict = self.wait_for_job_completion(resp)
+ return job_dict
+
+
+class StorageDelete(StorageValidation):
+ def check_even_single_given_volume_exists(self, volume_name_input_list):
+ for each_name in volume_name_input_list:
+ for cntrl_id, detail in self.idrac_data.get('Controllers').items():
+ for vol_id, volume in detail.get('Volumes').items():
+ if each_name == volume.get('Name'):
+ return True
+
+ def validate_volume_exists_in_server(self, volume_name_input_list):
+ changed, failed = False, False
+ single_volume_name_matched = self.check_even_single_given_volume_exists(volume_name_input_list)
+ if single_volume_name_matched:
+ if self.module.check_mode:
+ msg, changed = CHANGES_FOUND, True
+ else:
+ return
+ else:
+ msg, failed = VOLUME_NOT_FOUND, True
+ self.module.exit_json(msg=msg, failed=failed, changed=changed)
+
+ def validate(self):
+ # Validate upper layer input
+ self.validate_job_wait_negative_values()
+
+ # Validate for volume and volume_name
+ if (not (volumes := self.module.params.get('volumes'))) or (volumes and not all("name" in each for each in volumes)):
+ self.module.exit_json(msg=VOLUME_NAME_REQUIRED_FOR_DELETE, failed=True)
+
+ def construct_payload_for_delete(self, cntrl_id_vd_id_mapping):
+ parent_payload = """<SystemConfiguration>{0}</SystemConfiguration>"""
+ raid_payload = ""
+ for each_controller, value in cntrl_id_vd_id_mapping.items():
+ volume_payload = ""
+ for each_value in value:
+ volume_payload += xml_data_conversion({'RAIDaction': 'Delete'}, each_value)
+ raid_payload += xml_data_conversion({}, each_controller, volume_payload)
+ parent_payload = parent_payload.format(raid_payload)
+ return parent_payload
+
+ def get_vd_id_based_on_controller_id_vd_name(self, user_volume_input_list):
+ cntrl_id_vd_id_mapping = {}
+ for cntrl_id, detail in self.idrac_data.get('Controllers').items():
+ for vd_id, volume in detail.get('Volumes').items():
+ if volume.get('Name') in user_volume_input_list:
+ if cntrl_id not in cntrl_id_vd_id_mapping:
+ cntrl_id_vd_id_mapping[cntrl_id] = [vd_id]
+ else:
+ cntrl_id_vd_id_mapping[cntrl_id].append(vd_id)
+ return cntrl_id_vd_id_mapping
+
+ def execute(self):
+ self.validate()
+ job_dict = {}
+ volume_name_input_list = [each.get('name') for each in self.module.params.get('volumes')]
+ self.validate_volume_exists_in_server(set(volume_name_input_list))
+ cntrl_id_vd_id_mapping = self.get_vd_id_based_on_controller_id_vd_name(set(volume_name_input_list))
+ payload = self.construct_payload_for_delete(cntrl_id_vd_id_mapping)
+ resp = self.idrac.import_scp(import_buffer=payload, target="RAID", job_wait=False)
+ job_dict = self.wait_for_job_completion(resp)
+ return job_dict
+
+
+class StorageView(StorageData):
+ def __init__(self, idrac, module):
+ super().__init__(idrac, module)
+
+ def execute(self):
+ status = SUCCESS_STATUS
+ storage_data = self.fetch_storage_data()
+ controller_id = self.module.params.get("controller_id")
+ volume_id = self.module.params.get("volume_id")
+ if volume_id:
+ status, storage_data = self.process_volume_id(volume_id, controller_id, storage_data)
+ elif controller_id:
+ status, storage_data = self.process_controller_id(controller_id, storage_data)
+ return {"Message": storage_data, "Status": status}
+
+ def process_volume_id(self, volume_id, controller_id, storage_data):
+ status = SUCCESS_STATUS
+ if controller_id:
+ ctrl_data = storage_data["Controller"].get(controller_id)
+ if ctrl_data:
+ virtual_disk = ctrl_data.get("VirtualDisk")
+ if not virtual_disk or volume_id not in virtual_disk:
+ status = FAILED_STATUS
+ message = VIEW_VIRTUAL_DISK_DETAILS_NOT_FOUND.format(volume_id=volume_id, controller_id=controller_id)
+ self.module.exit_json(msg=VIEW_OPERATION_FAILED,
+ storage_status={"Message": message, "Status": status},
+ failed=True)
+ else:
+ storage_data[controller_id] = {"VirtualDisk": ctrl_data["VirtualDisk"]}
+ del storage_data["Controller"]
+ else:
+ status = FAILED_STATUS
+ message = VIEW_CONTROLLER_DETAILS_NOT_FOUND.format(controller_id=controller_id)
+ self.module.exit_json(msg=VIEW_OPERATION_FAILED,
+ storage_status={"Message": message, "Status": status},
+ failed=True)
+ else:
+ status = FAILED_STATUS
+ message = VIEW_OPERATION_CONTROLLER_NOT_SPECIFIED
+ self.module.exit_json(msg=VIEW_OPERATION_FAILED,
+ storage_status={"Message": message, "Status": status},
+ failed=True)
+ return status, storage_data
+
+ def process_controller_id(self, controller_id, storage_data):
+ status = SUCCESS_STATUS
+ ctrl_data = storage_data["Controller"].get(controller_id)
+ if ctrl_data:
+ storage_data[controller_id] = ctrl_data
+ del storage_data["Controller"]
+ else:
+ status = FAILED_STATUS
+ message = VIEW_CONTROLLER_DETAILS_NOT_FOUND.format(controller_id=controller_id)
+ self.module.exit_json(msg=VIEW_OPERATION_FAILED,
+ storage_status={"Message": message, "Status": status},
+ failed=True)
+ return status, storage_data
+
+
+def main():
+ specs = {
+ "state": {"choices": ['create', 'delete', 'view'], "default": 'view'},
+ "volume_id": {"type": 'str'},
+ "volumes": {"type": 'list', "elements": 'dict'},
+ "span_depth": {"type": 'int', "default": 1},
+ "span_length": {"type": 'int', "default": 1},
+ "number_dedicated_hot_spare": {"type": 'int', "default": 0},
+ "volume_type": {"choices": ['RAID 0', 'RAID 1', 'RAID 5', 'RAID 6', 'RAID 10', 'RAID 50', 'RAID 60'],
+ "default": 'RAID 0'},
+ "disk_cache_policy": {"choices": ["Default", "Enabled", "Disabled"],
+ "default": "Default"},
+ "write_cache_policy": {"choices": ["WriteThrough", "WriteBack", "WriteBackForce"],
+ "default": "WriteThrough"},
+ "read_cache_policy": {"choices": ["NoReadAhead", "ReadAhead", "AdaptiveReadAhead"],
+ "default": "NoReadAhead"},
+ "stripe_size": {"type": 'int', "default": 64 * 1024},
+ "capacity": {"type": 'float'},
+ "controller_id": {"type": 'str'},
+ "media_type": {"choices": ['HDD', 'SSD']},
+ "protocol": {"choices": ['SAS', 'SATA', 'PCIE']},
+ "raid_reset_config": {"choices": ['true', 'false'], "default": 'false'},
+ "raid_init_operation": {"choices": ['None', 'Fast']},
+ "job_wait": {"type": "bool", "default": True},
+ "job_wait_timeout": {"type": "int", "default": 900}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+ try:
+ with iDRACRedfishAPI(module.params) as idrac:
+ changed = False
+ state_class_mapping = {
+ 'create': StorageCreate,
+ 'view': StorageView,
+ 'delete': StorageDelete,
+ }
+ state_type = state_class_mapping.get(module.params['state'])
+ obj = state_type(idrac, module)
+ output = obj.execute()
+ msg = SUCCESSFUL_OPERATION_MSG.format(operation=module.params['state'])
+ changed = True if module.params['state'] in ['create', 'delete'] else False
+ module.exit_json(msg=msg, changed=changed, storage_status=output)
+ except HTTPError as err:
+ import json
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
index 65b1ae271..760546f02 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 7.0.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 9.1.0
+# Copyright (C) 2022-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -130,12 +130,13 @@ options:
description: The frequency of the PowerManager extension data maintenance and purging.
type: int
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
notes:
- This module supports C(check_mode).
author:
- Sachin Apagundi(@sachin-apa)
- Husniya Hameed (@husniya-hameed)
+ - ShivamSh3 (@ShivamSh3)
'''
EXAMPLES = r'''
@@ -656,13 +657,13 @@ def main():
for cp in resp_data:
cp_data = strip_substr_dict(cp)
cp_list.append(cp_data)
- module.exit_json(msg=SUCCESS_MSG, console_preferences=cp_list)
+ module.exit_json(msg=SUCCESS_MSG, console_preferences=cp_list, changed=True)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
index 7de50f0fb..c6245a6de 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 8.1.0
-# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 9.1.0
+# Copyright (C) 2022-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -130,7 +130,7 @@ options:
- ja to set Japanese language.
- zh to set Chinese language.
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Shivam Sharma (@ShivamSh3)"
@@ -221,9 +221,9 @@ location_details:
"LcdCustomString": "LCD Text",
"LcdLanguage": "en",
"LcdOverridePin": "",
- "LcdPinLength": null,
+ "LcdPinLength": 6,
"LcdPresence": "Present",
- "LedPresence": null,
+ "LedPresence": "Absent",
"QuickSync": {
"EnableInactivityTimeout": true,
"EnableQuickSyncWifi": false,
@@ -380,6 +380,8 @@ def check_mode_validation(module, loc_resp):
payload["QuickSync"]["QuickSyncHardware"] = loc_resp["QuickSync"]["QuickSyncHardware"]
payload["SettingType"] = "LocalAccessConfiguration"
payload["LcdPresence"] = loc_resp["LcdPresence"]
+ payload["LcdPinLength"] = loc_resp["LcdPinLength"]
+ payload["LedPresence"] = loc_resp["LedPresence"]
return payload
@@ -476,7 +478,7 @@ def main():
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
- module.fail_json(msg=str(err))
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
index f12cf7078..7d45bf7e9 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
@@ -3,7 +3,7 @@
#
# Dell OpenManage Ansible Modules
-# Version 8.7.0
+# Version 9.1.0
# Copyright (C) 2022-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -127,6 +127,7 @@ requirements:
author:
- "Felix Stephen (@felixs88)"
- "Shivam Sharma (@ShivamSh3)"
+ - "Kritika Bhateja (@Kritika-Bhateja-03)"
notes:
- Run this module from a system that has direct access to OpenManage Enterprise Modular.
- This module supports C(check_mode).
@@ -395,7 +396,7 @@ def ip_address_field(module, field, deploy_options, slot=False):
if field_value is not None:
valid = validate_ip_address(module_params.get(val[0]), val[1])
if valid is False:
- module.fail_json(msg=IP_FAIL_MSG.format(field_value, val[0]))
+ module.exit_json(msg=IP_FAIL_MSG.format(field_value, val[0]), failed=True)
def check_domain_service(module, rest_obj):
@@ -404,7 +405,7 @@ def check_domain_service(module, rest_obj):
except HTTPError as err:
err_message = json.load(err)
if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
- module.fail_json(msg=DOMAIN_FAIL_MSG)
+ module.exit_json(msg=DOMAIN_FAIL_MSG, failed=True)
def get_ip_from_host(hostname):
@@ -431,7 +432,7 @@ def get_chassis_device(module, rest_obj):
key, value = ("Id", data["DeviceId"])
break
else:
- module.fail_json(msg=FETCH_FAIL_MSG)
+ module.exit_json(msg=FETCH_FAIL_MSG, failed=True)
return key, value
@@ -469,12 +470,10 @@ def check_mode_validation(module, deploy_data):
"SlotIPV6Address": each.get("slot_ipv6_address"), "VlanId": each.get("vlan_id")}
if each.get("vlan_id") is not None:
req_slot_1.update({"VlanId": str(each.get("vlan_id"))})
- else:
- req_slot_1.update({"VlanId": ""})
req_filter_slot = dict([(k, v) for k, v in req_slot_1.items() if v is not None])
- exist_slot_1 = {"SlotId": exist_filter_slot[0]["SlotId"],
- "SlotIPV4Address": exist_filter_slot[0]["SlotIPV4Address"],
- "SlotIPV6Address": exist_filter_slot[0]["SlotIPV6Address"]}
+ exist_slot_1 = {"SlotId": exist_filter_slot[0].get("SlotId"),
+ "SlotIPV4Address": exist_filter_slot[0].get("SlotIPV4Address"),
+ "SlotIPV6Address": exist_filter_slot[0].get("SlotIPV6Address")}
if "VlanId" in exist_filter_slot[0]:
exist_slot_1.update({"VlanId": exist_filter_slot[0]["VlanId"]})
else:
@@ -487,7 +486,7 @@ def check_mode_validation(module, deploy_data):
else:
invalid_slot.append(each["slot_id"])
if invalid_slot:
- module.fail_json(msg=INVALID_SLOT_MSG.format(", ".join(map(str, invalid_slot))))
+ module.exit_json(msg=INVALID_SLOT_MSG.format(", ".join(map(str, invalid_slot))), failed=True)
if module.check_mode and any(diff_changes) is True:
module.exit_json(msg=CHANGES_FOUND, changed=True, quick_deploy_settings=deploy_data)
elif (module.check_mode and any(diff_changes) is False) or \
@@ -597,23 +596,25 @@ def get_device_details(rest_obj, module):
resp_data = resp.json_data.get("value")
rename_key = "id" if key == "Id" else "service tag"
if not resp_data:
- module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ module.exit_json(msg=DEVICE_FAIL_MSG.format(rename_key, value), failed=True)
if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag:
device_id = resp_data[0]["Id"]
elif key == "Id" and resp_data[0]["Id"] == device_id:
device_id = resp_data[0]["Id"]
else:
- module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ module.exit_json(msg=DEVICE_FAIL_MSG.format(rename_key, value), failed=True)
settings_type, settings_key = "IOMQuickDeploy", "IOM Quick Deploy"
if module.params["setting_type"] == "ServerQuickDeploy":
settings_type, settings_key = "ServerQuickDeploy", "Server Quick Deploy"
try:
deploy_resp = rest_obj.invoke_request("GET", QUICK_DEPLOY_API.format(device_id, settings_type))
except HTTPError as err:
- err_message = json.load(err)
- error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo')
+ if err.status == 404:
+ module.exit_json(msg=DEVICE_FAIL_MSG.format(rename_key, value), failed=True)
+ err_message = json.load(err).get("error")
+ error_msg = err_message.get('@Message.ExtendedInfo')
if error_msg and error_msg[0].get("MessageId") == "CGEN1004":
- module.fail_json(msg=QUICK_DEPLOY_FAIL_MSG.format(settings_key))
+ module.exit_json(msg=QUICK_DEPLOY_FAIL_MSG.format(settings_key), failed=True)
else:
resp_data = rest_obj.strip_substr_dict(deploy_resp.json_data)
payload, slot_payload = check_mode_validation(module, resp_data)
@@ -621,7 +622,7 @@ def get_device_details(rest_obj, module):
if module.params["job_wait"]:
job_failed, job_msg = rest_obj.job_tracking(job_id, job_wait_sec=module.params["job_wait_timeout"])
if job_failed is True:
- module.fail_json(msg=FAIL_MSG)
+ module.exit_json(msg=FAIL_MSG, failed=True)
job_success_resp = rest_obj.invoke_request("GET", QUICK_DEPLOY_API.format(device_id, settings_type))
job_success_data = rest_obj.strip_substr_dict(job_success_resp.json_data)
return job_id, job_success_data
@@ -667,7 +668,7 @@ def main():
mutually_exclusive=[('device_id', 'device_service_tag')],
supports_check_mode=True,)
if module.params["quick_deploy_options"] is None:
- module.fail_json(msg="missing required arguments: quick_deploy_options")
+ module.exit_json(msg="missing required arguments: quick_deploy_options", failed=True)
fields = [("ipv4_subnet_mask", "IPV4"), ("ipv4_gateway", "IPV4"), ("ipv6_gateway", "IPV6")]
ip_address_field(module, fields, module.params["quick_deploy_options"], slot=False)
slot_options = module.params["quick_deploy_options"].get("slots")
@@ -683,12 +684,12 @@ def main():
module.exit_json(msg=SUCCESS_MSG, job_id=job_id, quick_deploy_settings=data, changed=True)
module.exit_json(msg=JOB_MSG, job_id=job_id)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, SSLError, TypeError, ConnectionError,
AttributeError, IndexError, KeyError, OSError) as err:
- module.fail_json(msg=str(err))
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
index 876e5b235..2dc7b625d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.1.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 9.1.0
+# Copyright (C) 2022-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -19,8 +19,6 @@ module: ome_devices
short_description: Perform device-specific operations on target devices
description: Perform device-specific operations such as refresh inventory, clear iDRAC job queue, and reset iDRAC from OpenManage Enterprise.
version_added: 6.1.0
-author:
- - Jagadeesh N V(@jagadeeshnv)
extends_documentation_fragment:
- dellemc.openmanage.oment_auth_options
options:
@@ -77,7 +75,10 @@ options:
description: Optional description for the job.
type: str
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+ - ShivamSh3(@ShivamSh3)
notes:
- For C(idrac_reset), the job triggers only the iDRAC reset operation and does not track the complete reset cycle.
- Run this module from a system that has direct access to Dell OpenManage Enterprise.
@@ -248,6 +249,7 @@ JOB_DESC = "The {0} task initiated from OpenManage Ansible Modules for devices w
APPLY_TRIGGERED = "Successfully initiated the device action job."
JOB_SCHEDULED = "The job is scheduled successfully."
SUCCESS_MSG = "The device operation is performed successfully."
+TIMEOUT_NEGATIVE_MSG = "The parameter `job_wait_timeout` value cannot be negative or zero."
all_device_types = [1000, 2000, 4000, 5000, 7000, 8000, 9001]
device_type_map = {"refresh_inventory": all_device_types, "reset_idrac": [1000], "clear_idrac_job_queue": [1000]}
@@ -419,6 +421,8 @@ def main():
supports_check_mode=True
)
try:
+ if module.params.get("job_wait") and module.params.get("job_wait_timeout") <= 0:
+ module.exit_json(msg=TIMEOUT_NEGATIVE_MSG, failed=True)
with RestOME(module.params, req_session=True) as rest_obj:
if module.params.get("state") == 'present':
valids, invalids = get_dev_ids(module, rest_obj,
@@ -432,12 +436,12 @@ def main():
module.exit_json(msg=NO_CHANGES_MSG)
delete_devices(module, rest_obj, valids)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError,
OSError) as err:
- module.fail_json(msg=str(err))
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
index d8f0c5503..3de200a8f 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 8.5.0
-# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 9.1.0
+# Copyright (C) 2019-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -182,11 +182,13 @@ requirements:
author:
- "Sajna Shetty(@Sajna-Shetty)"
- "Kritika Bhateja(@Kritika-Bhateja-03)"
+ - "Shivam Sharma(@ShivamSh3)"
notes:
- Run this module from a system that has direct access to Redfish APIs.
- This module supports C(check_mode).
- This module always reports changes when I(name) and I(volume_id) are not specified.
Either I(name) or I(volume_id) is required to support C(check_mode).
+ - This module does not support the create operation of RAID6 and RAID60 storage volume on iDRAC8
- This module supports IPv4 and IPv6 addresses.
'''
@@ -374,6 +376,7 @@ import copy
from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.compat.version import LooseVersion
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import MANAGER_JOB_ID_URI, wait_for_redfish_reboot_job, \
@@ -401,6 +404,9 @@ REBOOT_FAIL = "Failed to reboot the server."
CONTROLLER_NOT_EXIST_ERROR = "Specified Controller {controller_id} does not exist in the System."
TIMEOUT_NEGATIVE_OR_ZERO_MSG = "The parameter job_wait_timeout value cannot be negative or zero."
SYSTEM_ID = "System.Embedded.1"
+GET_IDRAC_FIRMWARE_VER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1?$select=FirmwareVersion"
+ODATA_ID = "@odata.id"
+TARGET_OUT_OF_BAND = "Target out-of-band controller does not support storage feature using Redfish API."
volume_type_map = {"NonRedundant": "RAID0",
"Mirrored": "RAID1",
"StripedWithParity": "RAID5",
@@ -414,26 +420,26 @@ def fetch_storage_resource(module, session_obj):
system_resp = session_obj.invoke_request("GET", system_uri)
system_members = system_resp.json_data.get("Members")
if system_members:
- system_id_res = system_members[0]["@odata.id"]
- SYSTEM_ID = system_id_res.split('/')[-1]
+ system_id_res = system_members[0][ODATA_ID]
+ _SYSTEM_ID = system_id_res.split('/')[-1]
system_id_res_resp = session_obj.invoke_request("GET", system_id_res)
system_id_res_data = system_id_res_resp.json_data.get("Storage")
if system_id_res_data:
- storage_collection_map.update({"storage_base_uri": system_id_res_data["@odata.id"]})
+ storage_collection_map.update({"storage_base_uri": system_id_res_data[ODATA_ID]})
else:
- module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.")
+ module.fail_json(msg=TARGET_OUT_OF_BAND)
else:
- module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.")
+ module.fail_json(msg=TARGET_OUT_OF_BAND)
except HTTPError as err:
if err.code in [404, 405]:
- module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.",
+ module.fail_json(msg=TARGET_OUT_OF_BAND,
error_info=json.load(err))
raise err
except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
raise err
-def volume_payload(module):
+def volume_payload(module, greater_version):
params = module.params
drives = params.get("drives")
capacity_bytes = params.get("capacity_bytes")
@@ -448,8 +454,8 @@ def volume_payload(module):
capacity_bytes = int(capacity_bytes)
if drives:
storage_base_uri = storage_collection_map["storage_base_uri"]
- physical_disks = [{"@odata.id": DRIVES_URI.format(storage_base_uri=storage_base_uri,
- driver_id=drive_id)} for drive_id in drives]
+ physical_disks = [{ODATA_ID: DRIVES_URI.format(storage_base_uri=storage_base_uri,
+ driver_id=drive_id)} for drive_id in drives]
raid_mapper = {
"Name": params.get("name"),
"BlockSizeBytes": params.get("block_size_bytes"),
@@ -464,10 +470,15 @@ def volume_payload(module):
raid_payload.update({"Encrypted": encrypted})
if encryption_types:
raid_payload.update({"EncryptionTypes": [encryption_types]})
- if volume_type:
+ if volume_type and greater_version:
raid_payload.update({"RAIDType": volume_type_map.get(volume_type)})
- if raid_type:
+ if raid_type and greater_version:
raid_payload.update({"RAIDType": raid_type})
+ if volume_type and greater_version is False:
+ raid_payload.update({"VolumeType": volume_type})
+ if raid_type and greater_version is False:
+ raid_map = {value: key for key, value in volume_type_map.items()}
+ raid_payload.update({"VolumeType": raid_map.get(raid_type)})
if apply_time is not None:
raid_payload.update({"@Redfish.OperationApplyTime": apply_time})
return raid_payload
@@ -561,7 +572,7 @@ def perform_storage_volume_action(method, uri, session_obj, action, payload=None
raise err
-def check_mode_validation(module, session_obj, action, uri):
+def check_mode_validation(module, session_obj, action, uri, greater_version):
volume_id = module.params.get('volume_id')
name = module.params.get("name")
block_size_bytes = module.params.get("block_size_bytes")
@@ -575,49 +586,86 @@ def check_mode_validation(module, session_obj, action, uri):
if name is None and volume_id is None and module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True)
if action == "create" and name is not None:
- volume_resp = session_obj.invoke_request("GET", uri)
- volume_resp_data = volume_resp.json_data
- if volume_resp_data.get("Members@odata.count") == 0 and module.check_mode:
- module.exit_json(msg=CHANGES_FOUND, changed=True)
- elif 0 < volume_resp_data.get("Members@odata.count"):
- for mem in volume_resp_data.get("Members"):
- mem_resp = session_obj.invoke_request("GET", mem["@odata.id"])
- if mem_resp.json_data["Name"] == name:
- volume_id = mem_resp.json_data["Id"]
- break
- if name is not None and module.check_mode and volume_id is None:
- module.exit_json(msg=CHANGES_FOUND, changed=True)
+ volume_id = _create_name(module, session_obj, uri, name, volume_id)
if volume_id is not None:
- resp = session_obj.invoke_request("GET", SETTING_VOLUME_ID_URI.format(
- storage_base_uri=storage_collection_map["storage_base_uri"],
- volume_id=volume_id))
- resp_data = resp.json_data
+ _volume_id_check_mode(module, session_obj, greater_version, volume_id,
+ name, block_size_bytes, capacity_bytes, optimum_io_size_bytes,
+ encryption_types, encrypted, volume_type, raid_type, drives)
+ return None
+
+
+def _volume_id_check_mode(module, session_obj, greater_version, volume_id, name,
+ block_size_bytes, capacity_bytes, optimum_io_size_bytes,
+ encryption_types, encrypted, volume_type, raid_type, drives):
+ resp = session_obj.invoke_request("GET", SETTING_VOLUME_ID_URI.format(
+ storage_base_uri=storage_collection_map["storage_base_uri"],
+ volume_id=volume_id))
+ resp_data = resp.json_data
+ exist_value = _get_payload_for_version(greater_version, resp_data)
+ exit_value_filter = dict(
+ [(k, v) for k, v in exist_value.items() if v is not None])
+ cp_exist_value = copy.deepcopy(exit_value_filter)
+ req_value = get_request_value(greater_version, name, block_size_bytes, optimum_io_size_bytes, encryption_types, encrypted, volume_type, raid_type)
+ if capacity_bytes is not None:
+ req_value["CapacityBytes"] = int(capacity_bytes)
+ req_value_filter = dict([(k, v)
+ for k, v in req_value.items() if v is not None])
+ cp_exist_value.update(req_value_filter)
+ exist_drive, req_drive = [], []
+ if resp_data["Links"]:
+ exist_drive = [
+ disk[ODATA_ID].split("/")[-1] for disk in resp_data["Links"]["Drives"]]
+ if drives is not None:
+ req_drive = sorted(drives)
+ diff_changes = [bool(set(exit_value_filter.items()) ^ set(cp_exist_value.items())) or
+ bool(set(exist_drive) ^ set(req_drive))]
+ if module.check_mode and any(diff_changes) is True:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and any(diff_changes) is False) or \
+ (not module.check_mode and any(diff_changes) is False):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+
+
+def get_request_value(greater_version, name, block_size_bytes, optimum_io_size_bytes, encryption_types, encrypted, volume_type, raid_type):
+ if greater_version:
+ req_value = {"Name": name, "BlockSizeBytes": block_size_bytes,
+ "Encrypted": encrypted, "OptimumIOSizeBytes": optimum_io_size_bytes,
+ "RAIDType": raid_type, "EncryptionTypes": encryption_types}
+ else:
+ req_value = {"Name": name, "BlockSizeBytes": block_size_bytes,
+ "Encrypted": encrypted, "OptimumIOSizeBytes": optimum_io_size_bytes,
+ "VolumeType": volume_type, "EncryptionTypes": encryption_types}
+ return req_value
+
+
+def _get_payload_for_version(greater_version, resp_data):
+ if greater_version:
exist_value = {"Name": resp_data["Name"], "BlockSizeBytes": resp_data["BlockSizeBytes"],
"CapacityBytes": resp_data["CapacityBytes"], "Encrypted": resp_data["Encrypted"],
"EncryptionTypes": resp_data["EncryptionTypes"][0],
"OptimumIOSizeBytes": resp_data["OptimumIOSizeBytes"], "RAIDType": resp_data["RAIDType"]}
- exit_value_filter = dict([(k, v) for k, v in exist_value.items() if v is not None])
- cp_exist_value = copy.deepcopy(exit_value_filter)
- req_value = {"Name": name, "BlockSizeBytes": block_size_bytes,
- "Encrypted": encrypted, "OptimumIOSizeBytes": optimum_io_size_bytes,
- "RAIDType": raid_type, "EncryptionTypes": encryption_types}
- if capacity_bytes is not None:
- req_value["CapacityBytes"] = int(capacity_bytes)
- req_value_filter = dict([(k, v) for k, v in req_value.items() if v is not None])
- cp_exist_value.update(req_value_filter)
- exist_drive, req_drive = [], []
- if resp_data["Links"]:
- exist_drive = [disk["@odata.id"].split("/")[-1] for disk in resp_data["Links"]["Drives"]]
- if drives is not None:
- req_drive = sorted(drives)
- diff_changes = [bool(set(exit_value_filter.items()) ^ set(cp_exist_value.items())) or
- bool(set(exist_drive) ^ set(req_drive))]
- if module.check_mode and any(diff_changes) is True:
- module.exit_json(msg=CHANGES_FOUND, changed=True)
- elif (module.check_mode and any(diff_changes) is False) or \
- (not module.check_mode and any(diff_changes) is False):
- module.exit_json(msg=NO_CHANGES_FOUND)
- return None
+ else:
+ exist_value = {"Name": resp_data["Name"], "BlockSizeBytes": resp_data["BlockSizeBytes"],
+ "CapacityBytes": resp_data["CapacityBytes"], "Encrypted": resp_data["Encrypted"],
+ "EncryptionTypes": resp_data["EncryptionTypes"][0],
+ "OptimumIOSizeBytes": resp_data["OptimumIOSizeBytes"], "VolumeType": resp_data["VolumeType"]}
+ return exist_value
+
+
+def _create_name(module, session_obj, uri, name, volume_id):
+ volume_resp = session_obj.invoke_request("GET", uri)
+ volume_resp_data = volume_resp.json_data
+ if volume_resp_data.get("Members@odata.count") == 0 and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif 0 < volume_resp_data.get("Members@odata.count"):
+ for mem in volume_resp_data.get("Members"):
+ mem_resp = session_obj.invoke_request("GET", mem[ODATA_ID])
+ if mem_resp.json_data["Name"] == name:
+ volume_id = mem_resp.json_data["Id"]
+ break
+ if name is not None and module.check_mode and volume_id is None:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ return volume_id
def check_raid_type_supported(module, session_obj):
@@ -638,7 +686,7 @@ def check_raid_type_supported(module, session_obj):
raise err
-def get_apply_time(module, session_obj, controller_id):
+def get_apply_time(module, session_obj, controller_id, greater_version):
"""
gets the apply time from user if given otherwise fetches from server
"""
@@ -646,7 +694,10 @@ def get_apply_time(module, session_obj, controller_id):
try:
uri = APPLY_TIME_INFO_API.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
resp = session_obj.invoke_request("GET", uri)
- supported_apply_time_values = resp.json_data['@Redfish.OperationApplyTimeSupport']['SupportedValues']
+ if greater_version:
+ supported_apply_time_values = resp.json_data['@Redfish.OperationApplyTimeSupport']['SupportedValues']
+ else:
+ return apply_time
if apply_time:
if apply_time not in supported_apply_time_values:
module.exit_json(msg=APPLY_TIME_NOT_SUPPORTED_MSG.format(apply_time=apply_time, supported_apply_time_values=supported_apply_time_values),
@@ -658,24 +709,25 @@ def get_apply_time(module, session_obj, controller_id):
raise err
-def check_apply_time_supported_and_reboot_required(module, session_obj, controller_id):
+def check_apply_time_supported_and_reboot_required(module, session_obj, controller_id, greater_version):
"""
checks whether the apply time is supported and reboot operation is required or not.
"""
- apply_time = get_apply_time(module, session_obj, controller_id)
+ apply_time = get_apply_time(module, session_obj, controller_id, greater_version)
reboot_server = module.params.get("reboot_server")
if reboot_server and apply_time == "OnReset":
return True
return False
-def perform_volume_create_modify(module, session_obj):
+def perform_volume_create_modify(module, session_obj, greater_version):
"""
perform volume creation and modification for state present
"""
specified_controller_id = module.params.get("controller_id")
volume_id = module.params.get("volume_id")
- check_raid_type_supported(module, session_obj)
+ if greater_version:
+ check_raid_type_supported(module, session_obj)
action, uri, method = None, None, None
if specified_controller_id is not None:
check_controller_id_exists(module, session_obj)
@@ -690,8 +742,8 @@ def perform_volume_create_modify(module, session_obj):
volume_id=volume_id)
method = "PATCH"
action = "modify"
- payload = volume_payload(module)
- check_mode_validation(module, session_obj, action, uri)
+ payload = volume_payload(module, greater_version)
+ check_mode_validation(module, session_obj, action, uri, greater_version)
if not payload:
module.fail_json(msg="Input options are not provided for the {0} volume task.".format(action))
return perform_storage_volume_action(method, uri, session_obj, action, payload)
@@ -742,7 +794,7 @@ def perform_volume_initialization(module, session_obj):
module.fail_json(msg="'volume_id' option is a required property for initializing a volume.")
-def configure_raid_operation(module, session_obj):
+def configure_raid_operation(module, session_obj, greater_version):
"""
configure raid action based on state and command input
"""
@@ -750,7 +802,7 @@ def configure_raid_operation(module, session_obj):
state = module_params.get("state")
command = module_params.get("command")
if state is not None and state == "present":
- return perform_volume_create_modify(module, session_obj)
+ return perform_volume_create_modify(module, session_obj, greater_version)
elif state is not None and state == "absent":
return perform_volume_deletion(module, session_obj)
elif command is not None and command == "initialize":
@@ -818,11 +870,11 @@ def perform_reboot(module, session_obj):
module.exit_json(msg=msg, job_status=job_data)
-def check_job_tracking_required(module, session_obj, reboot_required, controller_id):
+def check_job_tracking_required(module, session_obj, reboot_required, controller_id, greater_version):
job_wait = module.params.get("job_wait")
apply_time = None
if controller_id:
- apply_time = get_apply_time(module, session_obj, controller_id)
+ apply_time = get_apply_time(module, session_obj, controller_id, greater_version)
if job_wait:
if apply_time == "OnReset" and not reboot_required:
return False
@@ -855,6 +907,15 @@ def validate_negative_job_time_out(module):
module.exit_json(msg=TIMEOUT_NEGATIVE_OR_ZERO_MSG, failed=True)
+def is_fw_ver_greater(session_obj):
+ firm_version = session_obj.invoke_request('GET', GET_IDRAC_FIRMWARE_VER_URI)
+ version = firm_version.json_data.get('FirmwareVersion', '')
+ if LooseVersion(version) <= '3.0':
+ return False
+ else:
+ return True
+
+
def main():
specs = {
"state": {"type": "str", "required": False, "choices": ['present', 'absent']},
@@ -899,6 +960,7 @@ def main():
validate_inputs(module)
validate_negative_job_time_out(module)
with Redfish(module.params, req_session=True) as session_obj:
+ greater_version = is_fw_ver_greater(session_obj)
fetch_storage_resource(module, session_obj)
controller_id = module.params.get("controller_id")
volume_id = module.params.get("volume_id")
@@ -907,16 +969,16 @@ def main():
if controller_id:
uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, CONTROLLER_NOT_EXIST_ERROR.format(controller_id=controller_id))
- reboot_required = check_apply_time_supported_and_reboot_required(module, session_obj, controller_id)
- status_message = configure_raid_operation(module, session_obj)
+ reboot_required = check_apply_time_supported_and_reboot_required(module, session_obj, controller_id, greater_version)
+ status_message = configure_raid_operation(module, session_obj, greater_version)
if volume_id and reboot_server:
controller_id = volume_id.split(":")[-1]
uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, CONTROLLER_NOT_EXIST_ERROR.format(controller_id=controller_id))
- reboot_required = check_apply_time_supported_and_reboot_required(module, session_obj, controller_id)
+ reboot_required = check_apply_time_supported_and_reboot_required(module, session_obj, controller_id, greater_version)
if reboot_required:
perform_reboot(module, session_obj)
- job_tracking_required = check_job_tracking_required(module, session_obj, reboot_required, controller_id)
+ job_tracking_required = check_job_tracking_required(module, session_obj, reboot_required, controller_id, greater_version)
job_id = status_message.get("task_id")
job_url = MANAGER_JOB_ID_URI.format(job_id)
if job_tracking_required: