summaryrefslogtreecommitdiffstats
path: root/ansible_collections/dellemc/openmanage/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/dellemc/openmanage/plugins')
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/README.md18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/inventory/ome_inventory.py208
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py15
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py78
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py15
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py187
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py60
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py148
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py141
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py11
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py25
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py112
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py333
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py268
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py24
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_license.py1118
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py23
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py9
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py21
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py13
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py17
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_network_attributes.py748
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py15
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py361
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py9
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py696
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py41
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py9
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py14
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py38
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_user_info.py232
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py30
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies.py1114
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_actions_info.py290
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_category_info.py316
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_info.py201
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_message_id_info.py141
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py12
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py49
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py14
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py21
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py5
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py27
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py32
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py113
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py5
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py22
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py138
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py100
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py56
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py118
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py41
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py54
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py302
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py32
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_profile_info.py410
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py4
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_info.py699
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink_info.py346
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py175
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan_info.py251
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py116
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware_rollback.py358
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py374
106 files changed, 9896 insertions, 1613 deletions
diff --git a/ansible_collections/dellemc/openmanage/plugins/README.md b/ansible_collections/dellemc/openmanage/plugins/README.md
index e5200a2da..7711a1d84 100644
--- a/ansible_collections/dellemc/openmanage/plugins/README.md
+++ b/ansible_collections/dellemc/openmanage/plugins/README.md
@@ -10,6 +10,8 @@ Here are the list of modules and module_utils supported by Dell.
├── omem_auth_options.py
├── oment_auth_options.py
└── redfish_auth_options.py
+├── inventory
+ └── ome_inventory.py
├── module_utils
├── dellemc_idrac.py
├── idrac_redfish.py
@@ -19,8 +21,6 @@ Here are the list of modules and module_utils supported by Dell.
└── modules
├── dellemc_configure_idrac_eventing.py
├── dellemc_configure_idrac_services.py
- ├── dellemc_get_firmware_inventory.py
- ├── dellemc_get_system_inventory.py
├── dellemc_idrac_lc_attributes.py
├── dellemc_idrac_storage_volume.py
├── dellemc_system_lockdown_mode.py
@@ -30,11 +30,13 @@ Here are the list of modules and module_utils supported by Dell.
├── idrac_certificates.py
├── idrac_firmware.py
├── idrac_firmware_info.py
+ ├── idrac_license.py
├── idrac_lifecycle_controller_job_status_info.py
├── idrac_lifecycle_controller_jobs.py
├── idrac_lifecycle_controller_logs.py
├── idrac_lifecycle_controller_status_info.py
├── idrac_network.py
+ ├── idrac_network_attributes.py
├── idrac_os_deployment.py
├── idrac_redfish_storage_controller.py
├── idrac_reset.py
@@ -43,8 +45,13 @@ Here are the list of modules and module_utils supported by Dell.
├── idrac_system_info.py
├── idrac_timezone_ntp.py
├── idrac_user.py
+ ├── idrac_user_info.py
├── idrac_virtual_media.py
├── ome_active_directory.py
+ ├── ome_alert_policies_message_id_info.py
+ ├── ome_alert_policies_info.py
+ ├── ome_alert_policies_actions_info.py
+ ├── ome_alert_policies_category_info.py
├── ome_application_alerts_smtp.py
├── ome_application_alerts_syslog.py
├── ome_application_certificate.py
@@ -83,18 +90,23 @@ Here are the list of modules and module_utils supported by Dell.
├── ome_network_vlan_info.py
├── ome_powerstate.py
├── ome_profile.py
+ ├── ome_profile_info.py
├── ome_server_interface_profile_info.py
├── ome_server_interface_profiles.py
+ ├── ome_smart_fabric_info.py
├── ome_smart_fabric.py
+ ├── ome_smart_fabric_uplink_info.py
├── ome_smart_fabric_uplink.py
├── ome_template.py
├── ome_template_identity_pool.py
├── ome_template_info.py
├── ome_template_network_vlan.py
+ ├── ome_template_network_vlan_info.py
├── ome_user.py
├── ome_user_info.py
├── redfish_event_subscription.py
├── redfish_firmware.py
+ ├── redfish_firmware_rollback.py
├── redfish_powerstate.py
└── redfish_storage_volume.py
-``` \ No newline at end of file
+```
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py
index 5ca16d6d7..cdad6d1b8 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -18,17 +18,23 @@ class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
idrac_ip:
- required: True
+ required: true
type: str
description: iDRAC IP Address.
idrac_user:
- required: True
+ required: true
type: str
- description: iDRAC username.
+ description:
+ - iDRAC username.
+ - If the username is not provided, then the environment variable C(IDRAC_USERNAME) is used.
+ - "Example: export IDRAC_USERNAME=username"
idrac_password:
- required: True
+ required: true
type: str
- description: iDRAC user password.
+ description:
+ - iDRAC user password.
+ - If the password is not provided, then the environment variable C(IDRAC_PASSWORD) is used.
+ - "Example: export IDRAC_PASSWORD=password"
aliases: ['idrac_pwd']
idrac_port:
type: int
@@ -36,11 +42,11 @@ options:
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py
index f0ebb7e3a..cc4a6289d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.0.0
-# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -18,7 +18,7 @@ class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
share_name:
- required: True
+ required: true
type: str
description: Network share or a local path.
share_user:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py
index b84c50d55..7627a6621 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,26 +20,32 @@ options:
hostname:
description: OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
type: str
- required: True
+ required: true
username:
- description: OpenManage Enterprise or OpenManage Enterprise Modular username.
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular username.
+ - If the username is not provided, then the environment variable C(OME_USERNAME) is used.
+ - "Example: export OME_USERNAME=username"
type: str
- required: True
+ required: true
password:
- description: OpenManage Enterprise or OpenManage Enterprise Modular password.
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular password.
+ - If the password is not provided, then the environment variable C(OME_PASSWORD) is used.
+ - "Example: export OME_PASSWORD=password"
type: str
- required: True
+ required: true
port:
description: OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
type: int
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py
index d8c616b2a..e611e02df 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,26 +20,32 @@ options:
hostname:
description: OpenManage Enterprise Modular IP address or hostname.
type: str
- required: True
+ required: true
username:
- description: OpenManage Enterprise Modular username.
+ description:
+ - OpenManage Enterprise Modular username.
+ - If the username is not provided, then the environment variable C(OME_USERNAME) is used.
+ - "Example: export OME_USERNAME=username"
type: str
- required: True
+ required: true
password:
- description: OpenManage Enterprise Modular password.
+ description:
+ - OpenManage Enterprise Modular password.
+ - If the password is not provided, then the environment variable C(OME_PASSWORD) is used.
+ - "Example: export OME_PASSWORD=password"
type: str
- required: True
+ required: true
port:
description: OpenManage Enterprise Modular HTTPS port.
type: int
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py
index 85b1553f7..676ded435 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,26 +20,32 @@ options:
hostname:
description: OpenManage Enterprise IP address or hostname.
type: str
- required: True
+ required: true
username:
- description: OpenManage Enterprise username.
+ description:
+ - OpenManage Enterprise username.
+ - If the username is not provided, then the environment variable C(OME_USERNAME) is used.
+ - "Example: export OME_USERNAME=username"
type: str
- required: True
+ required: true
password:
- description: OpenManage Enterprise password.
+ description:
+ - OpenManage Enterprise password.
+ - If the password is not provided, then the environment variable C(OME_PASSWORD) is used.
+ - "Example: export OME_PASSWORD=password"
type: str
- required: True
+ required: true
port:
description: OpenManage Enterprise HTTPS port.
type: int
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py
index 8eb1eda15..452bc8a2d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,22 +20,28 @@ options:
baseuri:
description: "IP address of the target out-of-band controller. For example- <ipaddress>:<port>."
type: str
- required: True
+ required: true
username:
- description: Username of the target out-of-band controller.
+ description:
+ - Username of the target out-of-band controller.
+ - If the username is not provided, then the environment variable C(IDRAC_USERNAME) is used.
+ - "Example: export IDRAC_USERNAME=username"
type: str
- required: True
+ required: true
password:
- description: Password of the target out-of-band controller.
+ description:
+ - Password of the target out-of-band controller.
+ - If the password is not provided, then the environment variable C(IDRAC_PASSWORD) is used.
+ - "Example: export IDRAC_PASSWORD=password"
type: str
- required: True
+ required: true
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/inventory/ome_inventory.py b/ansible_collections/dellemc/openmanage/plugins/inventory/ome_inventory.py
new file mode 100644
index 000000000..93171a429
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/inventory/ome_inventory.py
@@ -0,0 +1,208 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.4.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+name: ome_inventory
+short_description: Group inventory plugin on OpenManage Enterprise.
+description: This plugin allows to retrieve inventory hosts from groups on OpenManage Enterprise.
+version_added: "7.1.0"
+options:
+ hostname:
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+ - If the value is not specified in the task, the value of environment variable C(OME_HOSTNAME) will be used instead.
+ env:
+ - name: OME_HOSTNAME
+ type: str
+ required: true
+ username:
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular username.
+ - If the value is not specified in the task, the value of environment variable C(OME_USERNAME) will be used instead.
+ env:
+ - name: OME_USERNAME
+ type: str
+ required: true
+ password:
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular password.
+ - If the value is not specified in the task, the value of environment variable C(OME_PASSWORD) will be used instead.
+ env:
+ - name: OME_PASSWORD
+ type: str
+ required: true
+ port:
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+ - If the value is not specified in the task, the value of environment variable C(OME_PORT) will be used instead.
+ type: int
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ ome_group_name:
+ description: Group name.
+ type: str
+ required: false
+ host_vars:
+ description: To include host related variables in the inventory source.
+ type: dict
+ required: false
+ group_vars:
+ description: To include group variables in the inventory source.
+ type: dict
+ required: false
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this plugin on a system that has direct access to Dell OpenManage Enterprise.
+"""
+
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination
+
+GROUP_API = "GroupService/Groups"
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = "dellemc.openmanage.ome_inventory"
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+ self.config = None
+
+ def _get_connection_resp(self):
+ port = self.get_option("port") if "port" in self.config else 443
+ validate_certs = self.get_option("validate_certs") if "validate_certs" in self.config else False
+ module_params = {"hostname": self.get_option("hostname"), "username": self.get_option("username"),
+ "password": self.get_option("password"), "port": port, "validate_certs": validate_certs}
+ if "ca_path" in self.config:
+ module_params.update({"ca_path": self.get_option("ca_path")})
+ with RestOME(module_params, req_session=False) as ome:
+ all_group_data = get_all_data_with_pagination(ome, GROUP_API)
+ return all_group_data
+
+ def _set_host_vars(self, host):
+ self.inventory.set_variable(host, "idrac_ip", host)
+ self.inventory.set_variable(host, "baseuri", host)
+ self.inventory.set_variable(host, "hostname", host)
+ if "host_vars" in self.config:
+ host_vars = self.get_option("host_vars")
+ for key, val in dict(host_vars).items():
+ self.inventory.set_variable(host, key, val)
+
+ def _set_group_vars(self, group):
+ self.inventory.add_group(group)
+ if "group_vars" in self.config:
+ group_vars = self.get_option("group_vars")
+ if group in dict(group_vars):
+ for key, val in dict(dict(group_vars)[group]).items():
+ self.inventory.set_variable(group, key, val)
+
+ def _get_device_host(self, mgmt):
+ if len(mgmt["DeviceManagement"]) == 1 and mgmt["DeviceManagement"][0]["NetworkAddress"].startswith("["):
+ dev_host = mgmt["DeviceManagement"][0]["NetworkAddress"][1:-1]
+ elif len(mgmt["DeviceManagement"]) == 2 and mgmt["DeviceManagement"][0]["NetworkAddress"].startswith("["):
+ dev_host = mgmt["DeviceManagement"][1]["NetworkAddress"]
+ else:
+ dev_host = mgmt["DeviceManagement"][0]["NetworkAddress"]
+ return dev_host
+
+ def _get_all_devices(self, device_uri):
+ device_host = []
+ device_host_uri = device_uri.strip("/api/")
+ port = self.get_option("port") if "port" in self.config else 443
+ validate_certs = self.get_option("validate_certs") if "validate_certs" in self.config else False
+ module_params = {
+ "hostname": self.get_option("hostname"),
+ "username": self.get_option("username"),
+ "password": self.get_option("password"),
+ "port": port,
+ "validate_certs": validate_certs}
+ if "ca_path" in self.config:
+ module_params.update({"ca_path": self.get_option("ca_path")})
+ with RestOME(module_params, req_session=False) as ome:
+ device_resp = get_all_data_with_pagination(ome, device_host_uri)
+ device_data = device_resp.get("report_list", [])
+ if device_data is not None:
+ for mgmt in device_data:
+ if (len(mgmt["DeviceManagement"]) != 0):
+ device_host.append(self._get_device_host(mgmt))
+ return device_host
+
+ def _set_child_group(self, group_data):
+ port = self.get_option("port") if "port" in self.config else 443
+ validate_certs = self.get_option("validate_certs") if "validate_certs" in self.config else False
+ module_params = {"hostname": self.get_option("hostname"), "username": self.get_option("username"),
+ "password": self.get_option("password"), "port": port, "validate_certs": validate_certs}
+ if "ca_path" in self.config:
+ module_params.update({"ca_path": self.get_option("ca_path")})
+ with RestOME(module_params, req_session=False) as ome:
+ for gdata in group_data:
+ group_name = gdata["Name"]
+ subgroup_uri = gdata["SubGroups@odata.navigationLink"].strip("/api/")
+ sub_group = get_all_data_with_pagination(ome, subgroup_uri)
+ gdata = sub_group.get("report_list", [])
+ if gdata:
+ self._add_group_data(gdata)
+ self._add_child_group_data(group_name, gdata)
+
+ def _add_child_group_data(self, group_name, gdata):
+ for child_name in gdata:
+ self.inventory.add_child(group_name, child_name["Name"])
+
+ def _add_group_data(self, group_data):
+ visible_gdata = list(filter(lambda d: d.get("Visible") in [False], group_data))
+ if visible_gdata:
+ for gp in visible_gdata:
+ group_data.remove(gp)
+ for gdata in group_data:
+ self._set_group_vars(gdata["Name"])
+ device_ip = self._get_all_devices(gdata["AllLeafDevices@odata.navigationLink"])
+ for hst in device_ip:
+ self.inventory.add_host(host=hst, group=gdata["Name"])
+ self._set_host_vars(hst)
+ self._set_child_group(group_data)
+
+ def _populate(self, all_group_data):
+ group_data = all_group_data.get("report_list", [])
+ group_name = str(self.get_option("ome_group_name")) if "ome_group_name" in self.config else None
+ if group_name is not None:
+ group_data = list(filter(lambda d: d.get("Name").lower() in [group_name.lower()], group_data))
+ elif group_name is None:
+ group_data = list(filter(lambda d: d.get("Name") in ["All Devices"], group_data))
+ self._add_group_data(group_data)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self.config = self._read_config_data(path)
+ all_group_data = self._get_connection_resp()
+ self._populate(all_group_data)
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
index fee5339c5..b2b2240d0 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
@@ -29,10 +29,10 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
+from ansible.module_utils.common.parameters import env_fallback
try:
from omsdk.sdkinfra import sdkinfra
from omsdk.sdkcreds import UserCredentials
- from omsdk.sdkfile import FileOnShare, file_share_manager
from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum
from omsdk.http.sdkwsmanbase import WsManOptions
HAS_OMSDK = True
@@ -42,8 +42,8 @@ except ImportError:
idrac_auth_params = {
"idrac_ip": {"required": True, "type": 'str'},
- "idrac_user": {"required": True, "type": 'str'},
- "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_user": {"required": True, "type": 'str', "fallback": (env_fallback, ['IDRAC_USERNAME'])},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True, "fallback": (env_fallback, ['IDRAC_PASSWORD'])},
"idrac_port": {"required": False, "default": 443, "type": 'int'},
"validate_certs": {"type": "bool", "default": True},
"ca_path": {"type": "path"},
@@ -55,7 +55,7 @@ class iDRACConnection:
def __init__(self, module_params):
if not HAS_OMSDK:
- raise ImportError("Dell EMC OMSDK library is required for this module")
+ raise ImportError("Dell OMSDK library is required for this module")
self.idrac_ip = module_params['idrac_ip']
self.idrac_user = module_params['idrac_user']
self.idrac_pwd = module_params['idrac_password']
@@ -72,7 +72,7 @@ class iDRACConnection:
self.ca_path = self._get_omam_ca_env()
verify_ssl = self.ca_path
timeout = module_params.get("timeout", 30)
- if not timeout or type(timeout) != int:
+ if not timeout or not isinstance(timeout, int):
timeout = 30
self.pOp = WsManOptions(port=self.idrac_port, read_timeout=timeout, verify_ssl=verify_ssl)
self.sdk = sdkinfra()
@@ -81,6 +81,7 @@ class iDRACConnection:
raise RuntimeError(msg)
def __enter__(self):
+ self.idrac_ip = self.idrac_ip.strip('[]')
self.sdk.importPath()
protopref = ProtoPreference(ProtocolEnum.WSMAN)
protopref.include_only(ProtocolEnum.WSMAN)
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py
index 168c8277d..cf4581e89 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
-# Dell EMC OpenManage Ansible Modules
-# Version 5.5.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.0.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -36,11 +36,13 @@ import os
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.parameters import env_fallback
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import config_ipv6
idrac_auth_params = {
"idrac_ip": {"required": True, "type": 'str'},
- "idrac_user": {"required": True, "type": 'str'},
- "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_user": {"required": True, "type": 'str', "fallback": (env_fallback, ['IDRAC_USERNAME'])},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True, "fallback": (env_fallback, ['IDRAC_PASSWORD'])},
"idrac_port": {"required": False, "default": 443, "type": 'int'},
"validate_certs": {"type": "bool", "default": True},
"ca_path": {"type": "path"},
@@ -108,6 +110,7 @@ class iDRACRedfishAPI(object):
self.session_id = None
self.protocol = 'https'
self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.ipaddress = config_ipv6(self.ipaddress)
def _get_url(self, uri):
return "{0}://{1}:{2}{3}".format(self.protocol, self.ipaddress, self.port, uri)
@@ -203,7 +206,7 @@ class iDRACRedfishAPI(object):
This method fetches the connected server generation.
:return: 14, 4.11.11.11
"""
- model, firmware_version = None, None
+ firmware_version = None
response = self.invoke_request(MANAGER_URI, 'GET')
if response.status_code == 200:
generation = int(re.search(r"\d+(?=G)", response.json_data["Model"]).group())
@@ -250,7 +253,7 @@ class iDRACRedfishAPI(object):
return response
def export_scp(self, export_format=None, export_use=None, target=None,
- job_wait=False, share=None):
+ job_wait=False, share=None, include_in_export="Default"):
"""
This method exports system configuration details from the system.
:param export_format: XML or JSON.
@@ -275,6 +278,21 @@ class iDRACRedfishAPI(object):
payload["ShareParameters"]["Username"] = share["username"]
if share.get("password") is not None:
payload["ShareParameters"]["Password"] = share["password"]
+ if share.get("ignore_certificate_warning") is not None:
+ payload["ShareParameters"]["IgnoreCertificateWarning"] = share["ignore_certificate_warning"]
+ if share.get("proxy_support") is not None:
+ payload["ShareParameters"]["ProxySupport"] = share["proxy_support"]
+ if share.get("proxy_type") is not None:
+ payload["ShareParameters"]["ProxyType"] = share["proxy_type"]
+ if share.get("proxy_port") is not None:
+ payload["ShareParameters"]["ProxyPort"] = share["proxy_port"]
+ if share.get("proxy_server") is not None:
+ payload["ShareParameters"]["ProxyServer"] = share["proxy_server"]
+ if share.get("proxy_username") is not None:
+ payload["ShareParameters"]["ProxyUserName"] = share["proxy_username"]
+ if share.get("proxy_password") is not None:
+ payload["ShareParameters"]["ProxyPassword"] = share["proxy_password"]
+ payload["IncludeInExport"] = include_in_export
response = self.invoke_request(EXPORT_URI, "POST", data=payload)
if response.status_code == 202 and job_wait:
task_uri = response.headers["Location"]
@@ -311,10 +329,21 @@ class iDRACRedfishAPI(object):
payload["ShareParameters"]["Username"] = share["username"]
if share.get("password") is not None:
payload["ShareParameters"]["Password"] = share["password"]
+ if share.get("ignore_certificate_warning") is not None:
+ payload["ShareParameters"]["IgnoreCertificateWarning"] = share["ignore_certificate_warning"]
+ if share.get("proxy_support") is not None:
+ payload["ShareParameters"]["ProxySupport"] = share["proxy_support"]
+ if share.get("proxy_type") is not None:
+ payload["ShareParameters"]["ProxyType"] = share["proxy_type"]
+ if share.get("proxy_port") is not None:
+ payload["ShareParameters"]["ProxyPort"] = share["proxy_port"]
+ if share.get("proxy_server") is not None:
+ payload["ShareParameters"]["ProxyServer"] = share["proxy_server"]
+ if share.get("proxy_username") is not None:
+ payload["ShareParameters"]["ProxyUserName"] = share["proxy_username"]
+ if share.get("proxy_password") is not None:
+ payload["ShareParameters"]["ProxyPassword"] = share["proxy_password"]
response = self.invoke_request(IMPORT_URI, "POST", data=payload)
- if response.status_code == 202 and job_wait:
- task_uri = response.headers["Location"]
- response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
return response
def import_preview(self, import_buffer=None, target=None, share=None, job_wait=False):
@@ -335,6 +364,20 @@ class iDRACRedfishAPI(object):
payload["ShareParameters"]["Username"] = share["username"]
if share.get("password") is not None:
payload["ShareParameters"]["Password"] = share["password"]
+ if share.get("ignore_certificate_warning") is not None:
+ payload["ShareParameters"]["IgnoreCertificateWarning"] = share["ignore_certificate_warning"]
+ if share.get("proxy_support") is not None:
+ payload["ShareParameters"]["ProxySupport"] = share["proxy_support"]
+ if share.get("proxy_type") is not None:
+ payload["ShareParameters"]["ProxyType"] = share["proxy_type"]
+ if share.get("proxy_port") is not None:
+ payload["ShareParameters"]["ProxyPort"] = share["proxy_port"]
+ if share.get("proxy_server") is not None:
+ payload["ShareParameters"]["ProxyServer"] = share["proxy_server"]
+ if share.get("proxy_username") is not None:
+ payload["ShareParameters"]["ProxyUserName"] = share["proxy_username"]
+ if share.get("proxy_password") is not None:
+ payload["ShareParameters"]["ProxyPassword"] = share["proxy_password"]
response = self.invoke_request(IMPORT_PREVIEW, "POST", data=payload)
if response.status_code == 202 and job_wait:
task_uri = response.headers["Location"]
@@ -356,6 +399,21 @@ class iDRACRedfishAPI(object):
response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
return response
+ def import_preview_scp(self, import_buffer=None, target=None, job_wait=False):
+ """
+ This method imports preview system configuration details to the system.
+ :param import_buffer: import buffer payload content xml or json format
+ :param target: IDRAC or NIC or ALL or BIOS or RAID.
+ :param job_wait: True or False decide whether to wait till the job completion.
+ :return: json response
+ """
+ payload = {"ImportBuffer": import_buffer, "ShareParameters": {"Target": target}}
+ response = self.invoke_request(IMPORT_PREVIEW, "POST", data=payload)
+ if response.status_code == 202 and job_wait:
+ task_uri = response.headers["Location"]
+ response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
+ return response
+
def get_idrac_local_account_attr(self, idrac_attribues, fqdd=None):
"""
This method filtered from all the user attributes from the given idrac attributes.
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py
index cdb5ddf2c..cd0bb6be0 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -34,13 +34,15 @@ import json
import os
import time
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.common.parameters import env_fallback
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import config_ipv6
ome_auth_params = {
"hostname": {"required": True, "type": "str"},
- "username": {"required": True, "type": "str"},
- "password": {"required": True, "type": "str", "no_log": True},
+ "username": {"required": True, "type": "str", "fallback": (env_fallback, ['OME_USERNAME'])},
+ "password": {"required": True, "type": "str", "no_log": True, "fallback": (env_fallback, ['OME_PASSWORD'])},
"port": {"type": "int", "default": 443},
"validate_certs": {"type": "bool", "default": True},
"ca_path": {"type": "path"},
@@ -54,6 +56,7 @@ SESSION_RESOURCE_COLLECTION = {
JOB_URI = "JobService/Jobs({job_id})"
JOB_SERVICE_URI = "JobService/Jobs"
+HOST_UNRESOLVED_MSG = "Unable to resolve hostname or IP {0}."
class OpenURLResponse(object):
@@ -90,7 +93,7 @@ class RestOME(object):
def __init__(self, module_params=None, req_session=False):
self.module_params = module_params
- self.hostname = self.module_params["hostname"]
+ self.hostname = str(self.module_params["hostname"]).strip('][')
self.username = self.module_params["username"]
self.password = self.module_params["password"]
self.port = self.module_params["port"]
@@ -101,6 +104,7 @@ class RestOME(object):
self.session_id = None
self.protocol = 'https'
self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.hostname = config_ipv6(self.hostname)
def _get_base_url(self):
"""builds base url"""
@@ -261,7 +265,7 @@ class RestOME(object):
device_id = device_info["Id"]
return {"Id": device_id, "value": device_info}
- def get_all_items_with_pagination(self, uri):
+ def get_all_items_with_pagination(self, uri, query_param=None):
"""
This implementation mainly to get all available items from ome for pagination
supported GET uri
@@ -269,7 +273,7 @@ class RestOME(object):
:return: dict.
"""
try:
- resp = self.invoke_request('GET', uri)
+ resp = self.invoke_request('GET', uri, query_param=query_param)
data = resp.json_data
total_items = data.get("value", [])
total_count = data.get('@odata.count', 0)
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py
index 59c467057..8a26eaf60 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -34,11 +34,13 @@ import os
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.parameters import env_fallback
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import config_ipv6
redfish_auth_params = {
"baseuri": {"required": True, "type": "str"},
- "username": {"required": True, "type": "str"},
- "password": {"required": True, "type": "str", "no_log": True},
+ "username": {"required": True, "type": "str", "fallback": (env_fallback, ['IDRAC_USERNAME'])},
+ "password": {"required": True, "type": "str", "no_log": True, "fallback": (env_fallback, ['IDRAC_PASSWORD'])},
"validate_certs": {"type": "bool", "default": True},
"ca_path": {"type": "path"},
"timeout": {"type": "int", "default": 30},
@@ -49,6 +51,8 @@ SESSION_RESOURCE_COLLECTION = {
"SESSION_ID": "/redfish/v1/Sessions/{Id}",
}
+HOST_UNRESOLVED_MSG = "Unable to resolve hostname or IP {0}."
+
class OpenURLResponse(object):
"""Handles HTTPResponse"""
@@ -101,6 +105,7 @@ class Redfish(object):
self.protocol = 'https'
self.root_uri = '/redfish/v1/'
self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.hostname = config_ipv6(self.hostname)
def _get_base_url(self):
"""builds base url"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
index d0da26e57..3d8abfbe5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
# Dell OpenManage Ansible Modules
-# Version 6.1.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 8.2.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -34,17 +34,25 @@ NO_CHANGES_MSG = "No changes found to be applied."
RESET_UNTRACK = "iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply."
RESET_SUCCESS = "iDRAC has been reset successfully."
RESET_FAIL = "Unable to reset the iDRAC. For changes to reflect, manually reset the iDRAC."
+INVALID_ID_MSG = "Unable to complete the operation because " + \
+ "the value `{0}` for the input `{1}` parameter is invalid."
SYSTEM_ID = "System.Embedded.1"
MANAGER_ID = "iDRAC.Embedded.1"
SYSTEMS_URI = "/redfish/v1/Systems"
MANAGERS_URI = "/redfish/v1/Managers"
+CHASSIS_URI = "/redfish/v1/Chassis"
IDRAC_RESET_URI = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset"
SYSTEM_RESET_URI = "/redfish/v1/Systems/{res_id}/Actions/ComputerSystem.Reset"
MANAGER_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs?$expand=*($levels=1)"
MANAGER_JOB_ID_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{0}"
-
+GET_IDRAC_FIRMWARE_VER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1?$select=FirmwareVersion"
+HOSTNAME_REGEX = r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
import time
+from datetime import datetime
+import re
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
@@ -69,6 +77,21 @@ def strip_substr_dict(odata_dict, chkstr='@odata.', case_sensitive=False):
return odata_dict
+def config_ipv6(hostname):
+ ip_addr, port = hostname, None
+ if hostname.count(':') == 1:
+ ip_addr, port = hostname.split(':')
+ if not re.match(HOSTNAME_REGEX, ip_addr):
+ if ']:' in ip_addr:
+ ip_addr, port = ip_addr.split(']:')
+ ip_addr = ip_addr.strip('[]')
+ if port is None or port == "":
+ hostname = "[{0}]".format(ip_addr)
+ else:
+ hostname = "[{0}]:{1}".format(ip_addr, port)
+ return hostname
+
+
def job_tracking(rest_obj, job_uri, max_job_wait_sec=600, job_state_var=('LastRunStatus', 'Id'),
job_complete_states=(2060, 2020, 2090), job_fail_states=(2070, 2101, 2102, 2103),
job_running_states=(2050, 2040, 2030, 2100),
@@ -265,8 +288,8 @@ def reset_idrac(idrac_restobj, wait_time_sec=300, res_id=MANAGER_ID, interval=30
track_failed = True
reset_msg = "iDRAC reset triggered successfully."
try:
- resp = idrac_restobj.invoke_request(IDRAC_RESET_URI.format(res_id=res_id), 'POST',
- data={"ResetType": "GracefulRestart"})
+ idrac_restobj.invoke_request(IDRAC_RESET_URI.format(res_id=res_id), 'POST',
+ data={"ResetType": "GracefulRestart"})
if wait_time_sec:
track_failed, reset_msg = wait_after_idrac_reset(idrac_restobj, wait_time_sec, interval)
reset = True
@@ -348,3 +371,157 @@ def get_system_res_id(idrac):
res_uri = member[0].get('@odata.id')
res_id = res_uri.split("/")[-1]
return res_id, error_msg
+
+
+def get_all_data_with_pagination(ome_obj, uri, query_param=None):
+ """To get all the devices with pagination based on the filter provided."""
+ query, resp, report_list = "", None, []
+ try:
+ resp = ome_obj.invoke_request('GET', uri, query_param=query_param)
+ next_uri = resp.json_data.get("@odata.nextLink", None)
+ report_list = resp.json_data.get("value")
+ if query_param is not None:
+ for k, v in query_param.items():
+ query += "{0}={1}".format(k, v.replace(" ", "%20"))
+ while next_uri is not None:
+ next_uri_query = "{0}&{1}".format(next_uri.strip("/api"), query) if query else next_uri.strip("/api")
+ resp = ome_obj.invoke_request('GET', next_uri_query)
+ report_list.extend(resp.json_data.get("value"))
+ next_uri = resp.json_data.get("@odata.nextLink", None)
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+ return {"resp_obj": resp, "report_list": report_list}
+
+
+def remove_key(data, regex_pattern='@odata.'):
+ '''
+ :param data: the dict/list to be stripped of unwanted keys
+ :param remove_char: the substring to be checked among the keys
+ :return: dict/list
+ '''
+ try:
+ if isinstance(data, dict):
+ for key in list(data.keys()):
+ if re.match(regex_pattern, key):
+ data.pop(key, None)
+ else:
+ remove_key(data[key], regex_pattern)
+ elif isinstance(data, list):
+ for item in data:
+ remove_key(item, regex_pattern)
+ except Exception:
+ pass
+ return data
+
+
+def wait_for_redfish_reboot_job(redfish_obj, res_id, payload=None, wait_time_sec=300):
+ reset, job_resp, msg = False, {}, ""
+ try:
+ resp = redfish_obj.invoke_request('POST', SYSTEM_RESET_URI.format(res_id=res_id), data=payload, api_timeout=120)
+ time.sleep(10)
+ if wait_time_sec and resp.status_code == 204:
+ resp = redfish_obj.invoke_request("GET", MANAGER_JOB_URI)
+ reboot_job_lst = list(filter(lambda d: (d["JobType"] in ["RebootNoForce"]), resp.json_data["Members"]))
+ job_resp = max(reboot_job_lst, key=lambda d: datetime.strptime(d["StartTime"], "%Y-%m-%dT%H:%M:%S"))
+ if job_resp:
+ reset = True
+ else:
+ msg = RESET_FAIL
+ except Exception:
+ reset = False
+ return job_resp, reset, msg
+
+
+def wait_for_redfish_job_complete(redfish_obj, job_uri, job_wait=True, wait_timeout=120, sleep_time=10):
+ max_sleep_time = wait_timeout
+ sleep_interval = sleep_time
+ job_msg = "The job is not complete after {0} seconds.".format(wait_timeout)
+ job_resp = {}
+ if job_wait:
+ while max_sleep_time:
+ if max_sleep_time > sleep_interval:
+ max_sleep_time = max_sleep_time - sleep_interval
+ else:
+ sleep_interval = max_sleep_time
+ max_sleep_time = 0
+ time.sleep(sleep_interval)
+ job_resp = redfish_obj.invoke_request("GET", job_uri, api_timeout=120)
+ if job_resp.json_data.get("PercentComplete") == 100:
+ time.sleep(10)
+ return job_resp, ""
+ if job_resp.json_data.get("JobState") == "RebootFailed":
+ time.sleep(10)
+ return job_resp, job_msg
+ else:
+ time.sleep(10)
+ job_resp = redfish_obj.invoke_request("GET", job_uri, api_timeout=120)
+ return job_resp, ""
+ return job_resp, job_msg
+
+
+def get_dynamic_uri(idrac_obj, base_uri, search_label=''):
+ resp = idrac_obj.invoke_request(method='GET', uri=base_uri).json_data
+ if search_label:
+ if search_label in resp:
+ return resp[search_label]
+ return None
+ return resp
+
+
+def get_scheduled_job_resp(idrac_obj, job_type):
+ # job_type can be like 'NICConfiguration' or 'BIOSConfiguration'
+ job_resp = {}
+ job_list = idrac_obj.invoke_request(
+ MANAGER_JOB_URI, "GET").json_data.get('Members', [])
+ for each_job in job_list:
+ if each_job.get("JobType") == job_type and each_job.get("JobState") in ["Scheduled", "Running", "Starting"]:
+ job_resp = each_job
+ break
+ return job_resp
+
+
+def delete_job(idrac_obj, job_id):
+ resp = idrac_obj.invoke_request(uri=MANAGER_JOB_ID_URI.format(job_id), method="DELETE")
+ return resp.json_data
+
+
+def get_current_time(redfish_obj):
+ res_id = get_manager_res_id(redfish_obj)
+ resp = redfish_obj.invoke_request(MANAGERS_URI + '/' + res_id, "GET")
+ curr_time = resp.json_data.get("DateTime")
+ date_offset = resp.json_data.get("DateTimeLocalOffset")
+ return curr_time, date_offset
+
+
+def xml_data_conversion(attr_dict, fqdd=None):
+ component = """<Component FQDD="{0}">{1}</Component>"""
+ attr = ""
+ for k, v in attr_dict.items():
+ key = re.sub(r"\.(?!\d)", "#", k)
+ attr += '<Attribute Name="{0}">{1}</Attribute>'.format(key, v)
+ root = component.format(fqdd, attr)
+ return root
+
+
+def validate_and_get_first_resource_id_uri(module, idrac, base_uri):
+ odata = '@odata.id'
+ found = False
+ res_id_uri = None
+ res_id_input = module.params.get('resource_id')
+ res_id_members = get_dynamic_uri(idrac, base_uri, 'Members')
+ for each in res_id_members:
+ if res_id_input and res_id_input == each[odata].split('/')[-1]:
+ res_id_uri = each[odata]
+ found = True
+ break
+ if not found and res_id_input:
+ return res_id_uri, INVALID_ID_MSG.format(
+ res_id_input, 'resource_id')
+ elif res_id_input is None:
+ res_id_uri = res_id_members[0][odata]
+ return res_id_uri, ''
+
+
+def get_idrac_firmware_version(idrac):
+ firm_version = idrac.invoke_request(method='GET', uri=GET_IDRAC_FIRMWARE_VER_URI)
+ return firm_version.json_data.get('FirmwareVersion', '')
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py
index 945fd90e9..4a88a38e5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -88,7 +88,7 @@ options:
choices: [Enabled, Disabled]
smtp_ip_address:
type: str
- description: SMTP IP address for communication.
+ description: Enter the IPv4 or IPv6 address of the SMTP server or the FQDN or DNS name.
smtp_port:
type: str
description: SMTP Port number for access.
@@ -99,12 +99,13 @@ options:
type: str
description: Password for SMTP authentication.
requirements:
- - "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "omsdk >= 1.2.503"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -112,9 +113,9 @@ EXAMPLES = """
---
- name: Configure the iDRAC eventing attributes
dellemc.openmanage.dellemc_configure_idrac_eventing:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
destination_number: "2"
destination: "1.1.1.1"
@@ -194,7 +195,6 @@ try:
AlertEnable_IPMILanTypes,
SMTPAuthentication_RemoteHostsTypes)
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py
index 5a0eacf1b..e69563eee 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -28,15 +28,15 @@ description:
- This module allows to configure the iDRAC services related attributes.
options:
idrac_ip:
- required: True
+ required: true
type: str
description: iDRAC IP Address.
idrac_user:
- required: True
+ required: true
type: str
description: iDRAC username.
idrac_password:
- required: True
+ required: true
type: str
description: iDRAC user password.
aliases: ['idrac_pwd']
@@ -46,11 +46,11 @@ options:
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
@@ -137,11 +137,12 @@ options:
The community name is checked by the remote system to which the traps are sent.
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -149,24 +150,24 @@ EXAMPLES = """
---
- name: Configure the iDRAC services attributes
dellemc.openmanage.dellemc_configure_idrac_services:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- enable_web_server: "Enabled"
- http_port: 80
- https_port: 443
- ssl_encryption: "Auto_Negotiate"
- tls_protocol: "TLS_1_2_Only"
- timeout: "1800"
- snmp_enable: "Enabled"
- snmp_protocol: "SNMPv3"
- community_name: "public"
- alert_port: 162
- discovery_port: 161
- trap_format: "SNMPv3"
- ipmi_lan:
- community_name: "public"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_web_server: "Enabled"
+ http_port: 80
+ https_port: 443
+ ssl_encryption: "Auto_Negotiate"
+ tls_protocol: "TLS_1_2_Only"
+ timeout: "1800"
+ snmp_enable: "Enabled"
+ snmp_protocol: "SNMPv3"
+ community_name: "public"
+ alert_port: 162
+ discovery_port: 161
+ trap_format: "SNMPv3"
+ ipmi_lan:
+ community_name: "public"
"""
RETURN = r'''
@@ -234,7 +235,6 @@ try:
AgentEnable_SNMPTypes,
SNMPProtocol_SNMPTypes)
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py
deleted file mode 100644
index d667c916e..000000000
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
----
-module: dellemc_get_firmware_inventory
-short_description: Get Firmware Inventory
-version_added: "1.0.0"
-deprecated:
- removed_at_date: "2023-01-15"
- why: Replaced with M(dellemc.openmanage.idrac_firmware_info).
- alternative: Use M(dellemc.openmanage.idrac_firmware_info) instead.
- removed_from_collection: dellemc.openmanage
-description: Get Firmware Inventory.
-extends_documentation_fragment:
- - dellemc.openmanage.idrac_auth_options
-
-requirements:
- - "omsdk >= 1.2.488"
- - "python >= 3.8.6"
-author: "Rajeev Arakkal (@rajeevarakkal)"
-notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
- - This module supports C(check_mode).
-"""
-
-EXAMPLES = """
----
-- name: Get Installed Firmware Inventory
- dellemc.openmanage.dellemc_get_firmware_inventory:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
-"""
-
-RETURNS = """
-ansible_facts:
- description: Displays components and their firmware versions. Also, list of the firmware
- dictionaries (one dictionary per firmware).
- returned: success
- type: complex
- sample: {
- [
- {
- "BuildNumber": "0",
- "Classifications": "10",
- "ComponentID": "101100",
- "ComponentType": "FRMW",
- "DeviceID": null,
- "ElementName": "Power Supply.Slot.1",
- "FQDD": "PSU.Slot.1",
- "IdentityInfoType": "OrgID:ComponentType:ComponentID",
- "IdentityInfoValue": "DCIM:firmware:101100",
- "InstallationDate": "2018-01-18T07:25:08Z",
- "InstanceID": "DCIM:INSTALLED#0x15__PSU.Slot.1",
- "IsEntity": "true",
- "Key": "DCIM:INSTALLED#0x15__PSU.Slot.1",
- "MajorVersion": "0",
- "MinorVersion": "1",
- "RevisionNumber": "7",
- "RevisionString": null,
- "Status": "Installed",
- "SubDeviceID": null,
- "SubVendorID": null,
- "Updateable": "true",
- "VendorID": null,
- "VersionString": "00.1D.7D",
- "impactsTPMmeasurements": "false"
- }
- ]
- }
-"""
-
-
-import traceback
-from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
-from ansible.module_utils.basic import AnsibleModule
-try:
- from omsdk.sdkfile import LocalFile
- from omsdk.catalog.sdkupdatemgr import UpdateManager
- from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper
- HAS_OMSDK = True
-except ImportError:
- HAS_OMSDK = False
-
-
-def run_get_firmware_inventory(idrac, module):
- """
- Get Firmware Inventory
- Keyword arguments:
- idrac -- iDRAC handle
- module -- Ansible module
- """
-
- msg = {}
- # msg['changed'] = False
- msg['failed'] = False
- msg['msg'] = {}
- error = False
-
- try:
- # idrac.use_redfish = True
- msg['msg'] = idrac.update_mgr.InstalledFirmware
- if "Status" in msg['msg']:
- if msg['msg']['Status'] != "Success":
- msg['failed'] = True
-
- except Exception as err:
- error = True
- msg['msg'] = "Error: %s" % str(err)
- msg['exception'] = traceback.format_exc()
- msg['failed'] = True
-
- return msg, error
-
-
-# Main
-def main():
- module = AnsibleModule(
- argument_spec=idrac_auth_params,
- supports_check_mode=True)
-
- try:
- with iDRACConnection(module.params) as idrac:
- msg, err = run_get_firmware_inventory(idrac, module)
- except (ImportError, ValueError, RuntimeError) as e:
- module.fail_json(msg=str(e))
-
- if err:
- module.fail_json(**msg)
- module.exit_json(ansible_facts={idrac.ipaddr: {'Firmware Inventory': msg['msg']}})
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py
deleted file mode 100644
index e6a2d9eaf..000000000
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
----
-module: dellemc_get_system_inventory
-short_description: Get the PowerEdge Server System Inventory
-version_added: "1.0.0"
-deprecated:
- removed_at_date: "2023-01-15"
- why: Replaced with M(dellemc.openmanage.idrac_system_info).
- alternative: Use M(dellemc.openmanage.idrac_system_info) instead.
- removed_from_collection: dellemc.openmanage
-description:
- - Get the PowerEdge Server System Inventory.
-extends_documentation_fragment:
- - dellemc.openmanage.idrac_auth_options
-
-requirements:
- - "omsdk >= 1.2.488"
- - "python >= 3.8.6"
-author: "Rajeev Arakkal (@rajeevarakkal)"
-notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
- - This module supports C(check_mode).
-"""
-
-EXAMPLES = """
----
-- name: Get System Inventory
- dellemc.openmanage.dellemc_get_system_inventory:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
-"""
-
-RETURNS = """
-ansible_facts:
- description: Displays the Dell EMC PowerEdge Server System Inventory.
- returned: success
- type: complex
- sample: {
- "SystemInventory": {
- "BIOS": [
- {
- "BIOSReleaseDate": "10/19/2017",
- "FQDD": "BIOS.Setup.1-1",
- "InstanceID": "DCIM:INSTALLED#741__BIOS.Setup.00",
- "Key": "DCIM:INSTALLED#741__BIOS.Setup.00",
- "SMBIOSPresent": "True",
- "VersionString": "1.2.11"
- }
- ],
- "CPU": [
- {
- "CPUFamily": "Intel(R) Xeon(TM)",
- "Characteristics": "64-bit capable",
- "CurrentClockSpeed": "2.3 GHz",
- "DeviceDescription": "CPU 1",
- "ExecuteDisabledCapable": "Yes",
- }
- ]
- }
-}
-msg:
- description: Details of the Error occurred.
- returned: on error
- type: dict
- sample: {
- "error": {
- "code": "Base.1.0.GeneralError",
- "message": "A general error has occurred. See ExtendedInfo for more information.",
- "@Message.ExtendedInfo": [
- {
- "MessageId": "GEN1234",
- "RelatedProperties": [],
- "Message": "Unable to process the request because an error occurred.",
- "MessageArgs": [],
- "Severity": "Critical",
- "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
- }
- ]
- }
- }
-"""
-
-
-from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
-from ansible.module_utils.basic import AnsibleModule
-
-
-# Get System Inventory
-def run_get_system_inventory(idrac, module):
- msg = {}
- msg['changed'] = False
- msg['failed'] = False
- err = False
-
- try:
- # idrac.use_redfish = True
- idrac.get_entityjson()
- msg['msg'] = idrac.get_json_device()
- except Exception as e:
- err = True
- msg['msg'] = "Error: %s" % str(e)
- msg['failed'] = True
- return msg, err
-
-
-# Main
-def main():
- module = AnsibleModule(
- argument_spec=idrac_auth_params,
- supports_check_mode=True)
-
- try:
- with iDRACConnection(module.params) as idrac:
- msg, err = run_get_system_inventory(idrac, module)
- except (ImportError, ValueError, RuntimeError) as e:
- module.fail_json(msg=str(e))
-
- if err:
- module.fail_json(**msg)
- module.exit_json(ansible_facts={idrac.ipaddr: {'SystemInventory': msg['msg']}})
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py
index eec09c1c8..7762bc0b9 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -61,11 +61,12 @@ options:
default: Enabled
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -73,11 +74,11 @@ EXAMPLES = """
---
- name: Set up iDRAC LC Attributes
dellemc.openmanage.dellemc_idrac_lc_attributes:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- csior: "Enabled"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ csior: "Enabled"
"""
RETURN = r'''
@@ -141,7 +142,6 @@ from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
try:
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
index 01c915eae..e8021db18 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -123,10 +123,11 @@ options:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
'''
@@ -172,7 +173,7 @@ EXAMPLES = r'''
span_length: 3
span_depth: 1
drives:
- location: [7,3,5]
+ location: [7, 3, 5]
disk_cache_policy: "Disabled"
write_cache_policy: "WriteBack"
read_cache_policy: "NoReadAhead"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py
index 3be038e44..412e5a6f9 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -59,11 +59,12 @@ options:
choices: [Enabled, Disabled]
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module does not support C(check_mode).
"""
@@ -71,11 +72,11 @@ EXAMPLES = """
---
- name: Check System Lockdown Mode
dellemc.openmanage.dellemc_system_lockdown_mode:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- lockdown_mode: "Disabled"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ lockdown_mode: "Disabled"
"""
RETURN = r'''
@@ -144,7 +145,6 @@ from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
try:
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py
index c9c80854a..1b61b3ebe 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -32,7 +32,7 @@ options:
To view the list of attributes in Attribute Registry for iDRAC9 and above,
see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1)
and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
- - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - "For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
(for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is
<GroupName>.<Instance>.<AttributeName> (for Example, 'SNMP.1.AgentCommunity')."
@@ -43,7 +43,7 @@ options:
part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and above,
see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1)
and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
- - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - "For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
(for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is
<GroupName>.<Instance>.<AttributeName> (for Example, 'ThermalSettings.1.ThermalProfile')."
@@ -54,7 +54,7 @@ options:
part of the Integrated Dell Remote Access Controller Attribute Registry.To view the list of attributes in Attribute Registry for iDRAC9 and above,
see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1)
and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
- - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - "For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
(for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is
<GroupName>.<Instance>.<AttributeName> (for Example, 'LCAttributes.1.AutoUpdate')."
@@ -69,7 +69,7 @@ author:
notes:
- Run this module from a system that has direct access to Dell iDRAC.
- This module supports C(check_mode).
- - For iDRAC7 and iDRAC8 based servers, the value provided for the attributes are not be validated.
+ - For iDRAC8 based servers, the value provided for the attributes are not be validated.
Ensure appropriate values are passed.
'''
@@ -188,7 +188,7 @@ EXAMPLES = """
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
idrac_attributes:
- Time.1.TimeZone: CST6CDT
+ Time.1.Timezone: CST6CDT
NTPConfigGroup.1.NTPEnable: Enabled
NTPConfigGroup.1.NTP1: 192.168.0.5
NTPConfigGroup.1.NTP2: 192.168.0.6
@@ -260,9 +260,8 @@ error_info:
import json
import re
-from ssl import SSLError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_manager_res_id
from ansible.module_utils.basic import AnsibleModule
@@ -285,7 +284,7 @@ def xml_data_conversion(attrbite, fqdd=None):
attr = ""
json_data = {}
for k, v in attrbite.items():
- key = re.sub(r"(?<=\d)\.", "#", k)
+ key = re.sub(r"\.(?!\d)", "#", k)
attr += '<Attribute Name="{0}">{1}</Attribute>'.format(key, v)
json_data[key] = str(v)
root = component.format(fqdd, attr)
@@ -510,13 +509,13 @@ def main():
res_id = get_manager_res_id(idrac)
diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr = fetch_idrac_uri_attr(idrac, module, res_id)
process_check_mode(module, diff)
- resp = update_idrac_attributes(idrac, module, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr)
+ update_idrac_attributes(idrac, module, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr)
module.exit_json(msg=SUCCESS_MSG, changed=True)
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ except (IOError, ValueError, TypeError, ConnectionError, AttributeError, IndexError, KeyError) as err:
module.fail_json(msg=str(err), error_info=json.load(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
index 8cd9c5e7b..aba65f3f6 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.2.0
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 7.6.0
+# Copyright (C) 2018-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -67,12 +67,12 @@ options:
- "The format is YYYY-MM-DDThh:mm:ss<offset>"
- "<offset> is the time offset from UTC that the current timezone set in
iDRAC in the format: +05:30 for IST."
- required: True
+ required: true
duration:
type: int
description:
- The duration in seconds for the maintenance window.
- required: True
+ required: true
attributes:
type: dict
description:
@@ -124,19 +124,21 @@ options:
type: int
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
default: 1200
requirements:
- "omsdk >= 1.2.490"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
- "Jagadeesh N V (@jagadeeshnv)"
+ - "Shivam Sharma (@shivam-sharma)"
notes:
- omsdk is required to be installed only for I(boot_sources) operation.
- This module requires 'Administrator' privilege for I(idrac_user).
- Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -144,20 +146,20 @@ EXAMPLES = """
---
- name: Configure generic attributes of the BIOS
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
attributes:
- BootMode : "Bios"
+ BootMode: "Bios"
OneTimeBootMode: "Enabled"
BootSeqRetry: "Enabled"
- name: Configure PXE generic attributes
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
attributes:
PxeDev1EnDis: "Enabled"
@@ -169,82 +171,82 @@ EXAMPLES = """
- name: Configure BIOS attributes at Maintenance window
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
apply_time: AtMaintenanceWindowStart
maintenance_window:
start_time: "2022-09-30T05:15:40-05:00"
duration: 600
attributes:
- BootMode : "Bios"
+ BootMode: "Bios"
OneTimeBootMode: "Enabled"
BootSeqRetry: "Enabled"
- name: Clear pending BIOS attributes
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- clear_pending: yes
+ clear_pending: true
- name: Reset BIOS attributes to default settings.
dellemc.openmanage.idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_pwd }}"
- validate_certs: False
- reset_bios: yes
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ validate_certs: false
+ reset_bios: true
- name: Configure boot sources
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-2-3"
- Enabled : true
- Index : 0
+ - Name: "NIC.Integrated.1-2-3"
+ Enabled: true
+ Index: 0
- name: Configure multiple boot sources
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Enabled : true
- Index : 0
- - Name : "NIC.Integrated.2-2-2"
- Enabled : true
- Index : 1
- - Name : "NIC.Integrated.3-3-3"
- Enabled : true
- Index : 2
+ - Name: "NIC.Integrated.1-1-1"
+ Enabled: true
+ Index: 0
+ - Name: "NIC.Integrated.2-2-2"
+ Enabled: true
+ Index: 1
+ - Name: "NIC.Integrated.3-3-3"
+ Enabled: true
+ Index: 2
- name: Configure boot sources - Enabling
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Enabled : true
+ - Name: "NIC.Integrated.1-1-1"
+ Enabled: true
- name: Configure boot sources - Index
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Index : 0
+ - Name: "NIC.Integrated.1-1-1"
+ Index: 0
"""
RETURN = """
@@ -343,6 +345,7 @@ UNSUPPORTED_APPLY_TIME = "Apply time {0} is not supported."
MAINTENANCE_OFFSET = "The maintenance time must be post-fixed with local offset to {0}."
MAINTENANCE_TIME = "The specified maintenance time window occurs in the past, " \
"provide a future time to schedule the maintenance window."
+NEGATIVE_TIMEOUT_MESSAGE = "The parameter job_wait_timeout value cannot be negative or zero."
POWER_CHECK_RETRIES = 30
POWER_CHECK_INTERVAL = 10
@@ -561,7 +564,7 @@ def track_log_entry(redfish_obj):
else:
# msg = "{0}{1}".format(BIOS_RESET_TRIGGERED, "LOOPOVER")
msg = BIOS_RESET_TRIGGERED
- except Exception as ex:
+ except Exception:
# msg = "{0}{1}".format(BIOS_RESET_TRIGGERED, str(ex))
msg = BIOS_RESET_TRIGGERED
return msg
@@ -573,7 +576,7 @@ def reset_bios(module, redfish_obj):
module.exit_json(status_msg=BIOS_RESET_PENDING, failed=True)
if module.check_mode:
module.exit_json(status_msg=CHANGES_MSG, changed=True)
- resp = redfish_obj.invoke_request(RESET_BIOS_DEFAULT, "POST", data="{}", dump=True)
+ redfish_obj.invoke_request(RESET_BIOS_DEFAULT, "POST", data="{}", dump=True)
reset_success = reset_host(module, redfish_obj)
if not reset_success:
module.exit_json(failed=True, status_msg="{0} {1}".format(RESET_TRIGGERRED, HOST_RESTART_FAILED))
@@ -598,7 +601,7 @@ def clear_pending_bios(module, redfish_obj):
module.exit_json(status_msg=SUCCESS_CLEAR, changed=True)
if module.check_mode:
module.exit_json(status_msg=CHANGES_MSG, changed=True)
- resp = redfish_obj.invoke_request(CLEAR_PENDING_URI, "POST", data="{}", dump=False)
+ redfish_obj.invoke_request(CLEAR_PENDING_URI, "POST", data="{}", dump=False)
module.exit_json(status_msg=SUCCESS_CLEAR, changed=True)
@@ -698,7 +701,7 @@ def apply_attributes(module, redfish_obj, pending, rf_settings):
payload["@Redfish.SettingsApplyTime"] = rf_set
resp = redfish_obj.invoke_request(BIOS_SETTINGS, "PATCH", data=payload)
if rf_set:
- tmp_resp = redfish_obj.invoke_request(resp.headers["Location"], "GET")
+ redfish_obj.invoke_request(resp.headers["Location"], "GET")
job_id = resp.headers["Location"].split("/")[-1]
else:
if aplytm == "Immediate":
@@ -758,6 +761,11 @@ def attributes_config(module, redfish_obj):
job_id=job_id, changed=True)
+def validate_negative_job_time_out(module):
+ if module.params.get("job_wait_timeout") <= 0:
+ module.fail_json(msg=NEGATIVE_TIMEOUT_MESSAGE)
+
+
def main():
specs = {
"share_name": {"type": 'str'},
@@ -785,6 +793,7 @@ def main():
required_if=[["apply_time", "AtMaintenanceWindowStart", ("maintenance_window",)],
["apply_time", "InMaintenanceWindowOnReset", ("maintenance_window",)]],
supports_check_mode=True)
+ validate_negative_job_time_out(module)
try:
msg = {}
if module.params.get("boot_sources") is not None:
@@ -810,7 +819,8 @@ def main():
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
- module.exit_json(msg=str(err), unreachable=True)
+ message = err.reason if err.reason else str(err)
+ module.exit_json(msg=message, unreachable=True)
except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError) as e:
module.fail_json(msg=str(e))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py
index ad563c5ce..1e28ef30d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.1.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 8.0.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -69,7 +69,7 @@ options:
- C(continuous) The system boots to the target specified in the I(boot_source_override_target)
until this property is set to Disabled.
- The state is set to C(once) for the one-time boot override and C(continuous) for the
- remain-active-until—canceled override. If the state is set C(once), the value is reset
+ remain-active-until—canceled override. If the state is set C(once) or C(continuous), the value is reset
to C(disabled) after the I(boot_source_override_target) actions have completed successfully.
- Changes to this options do not alter the BIOS persistent boot order configuration.
- This is mutually exclusive with I(boot_options).
@@ -101,8 +101,8 @@ options:
type: str
description:
- C(none) Host system is not rebooted and I(job_wait) is not applicable.
- - C(force_reset) Forcefully reboot the Host system.
- - C(graceful_reset) Gracefully reboot the Host system.
+ - C(force_restart) Forcefully reboot the Host system.
+ - C(graceful_restart) Gracefully reboot the Host system.
choices: [graceful_restart, force_restart, none]
default: graceful_restart
job_wait:
@@ -115,7 +115,7 @@ options:
type: int
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
default: 900
resource_id:
type: str
@@ -413,7 +413,7 @@ def apply_boot_settings(module, idrac, payload, res_id):
def configure_boot_settings(module, idrac, res_id):
- job_resp, diff_change, payload = {}, [], {"Boot": {}}
+ job_resp, payload = {}, {"Boot": {}}
boot_order = module.params.get("boot_order")
override_mode = module.params.get("boot_source_override_mode")
override_enabled = module.params.get("boot_source_override_enabled")
@@ -457,7 +457,7 @@ def configure_boot_settings(module, idrac, res_id):
def configure_idrac_boot(module, idrac, res_id):
boot_options = module.params.get("boot_options")
- inv_boot_options, diff_change, payload, job_resp, boot_attr = [], [], {}, {}, {}
+ inv_boot_options, diff_change, payload, job_resp = [], [], {}, {}
if boot_options is not None:
boot_option_data = get_existing_boot_options(idrac, res_id)
for each in boot_options:
@@ -551,8 +551,8 @@ def main():
except HTTPError as err:
if err.code == 401:
module.fail_json(msg=AUTH_ERROR_MSG.format(module.params["idrac_ip"]))
- module.fail_json(msg=str(err), error_info=json.load(err))
- except URLError as err:
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError:
module.exit_json(msg=AUTH_ERROR_MSG.format(module.params["idrac_ip"]), unreachable=True)
except (ImportError, ValueError, RuntimeError, SSLValidationError,
ConnectionError, KeyError, TypeError, IndexError) as e:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py
index f5471a3ad..a429c639b 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.5.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.6.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -14,7 +14,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: idrac_certificates
short_description: Configure certificates for iDRAC
@@ -32,17 +32,19 @@ options:
- C(export), export the certificate. This requires I(certificate_path).
- C(reset), reset the certificate to default settings. This is applicable only for C(HTTPS).
type: str
- choices: ['import', 'export', 'generate_csr', 'reset']
+ choices: [import, export, generate_csr, reset]
default: 'generate_csr'
certificate_type:
description:
- Type of the iDRAC certificate.
- C(HTTPS) The Dell self-signed SSL certificate.
- C(CA) Certificate Authority(CA) signed SSL certificate.
- - C(CSC) The custom signed SSL certificate.
+ - C(CUSTOMCERTIFICATE) The custom PKCS12 certificate and private key. Export of custom certificate is supported only on iDRAC firmware version 7.00.00.00
+ and above.
+ - C(CSC) The custom signing SSL certificate.
- C(CLIENT_TRUST_CERTIFICATE) Client trust certificate.
type: str
- choices: ['HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE']
+ choices: [HTTPS, CA, CUSTOMCERTIFICATE, CSC, CLIENT_TRUST_CERTIFICATE]
default: 'HTTPS'
certificate_path:
description:
@@ -52,6 +54,13 @@ options:
passphrase:
description: The passphrase string if the certificate to be imported is passphrase protected.
type: str
+ ssl_key:
+ description:
+ - Absolute path of the private or SSL key file.
+ - This is applicable only when I(command) is C(import) and I(certificate_type) is C(HTTPS).
+ - Uploading the SSL key to iDRAC is supported on firmware version 6.00.02.00 and above.
+ type: path
+ version_added: 8.6.0
cert_params:
description: Certificate parameters to generate signing request.
type: dict
@@ -59,31 +68,30 @@ options:
common_name:
description: The common name of the certificate.
type: str
- required: True
+ required: true
organization_unit:
description: The name associated with an organizational unit. For example department name.
type: str
- required: True
+ required: true
locality_name:
description: The city or other location where the entity applying for certification is located.
type: str
- required: True
+ required: true
state_name:
description: The state where the entity applying for certification is located.
type: str
- required: True
+ required: true
country_code:
description: The country code of the country where the entity applying for certification is located.
type: str
- required: True
+ required: true
email_address:
description: The email associated with the CSR.
type: str
- required: True
organization_name:
description: The name associated with an organization.
type: str
- required: True
+ required: true
subject_alt_name:
description: The alternative domain names associated with the request.
type: list
@@ -97,24 +105,27 @@ options:
- To reset the iDRAC after the certificate operation.
- This is applicable when I(command) is C(import) or C(reset).
type: bool
- default: True
+ default: true
wait:
description:
- Maximum wait time for iDRAC to start after the reset, in seconds.
- - This is applicable when I(command) is C(import) or C(reset) and I(reset) is C(True).
+ - This is applicable when I(command) is C(import) or C(reset) and I(reset) is C(true).
type: int
default: 300
requirements:
- "python >= 3.8.6"
author:
- "Jagadeesh N V(@jagadeeshnv)"
+ - "Rajshekar P(@rajshekarp87)"
+ - "Kristian Lamb V(@kristian_lamb)"
notes:
- - The certificate operations are supported on iDRAC firmware 5.10.10.00 and above.
+ - The certificate operations are supported on iDRAC firmware version 6.10.80.00 and above.
- Run this module from a system that has direct access to Dell iDRAC.
- This module supports C(check_mode).
-'''
+ - This module supports IPv4 and IPv6 addresses.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
---
- name: Generate HTTPS certificate signing request
dellemc.openmanage.idrac_certificates:
@@ -146,6 +157,17 @@ EXAMPLES = r'''
certificate_type: "HTTPS"
certificate_path: "/path/to/cert.pem"
+- name: Import an HTTPS certificate along with its private key.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "/path/to/cert.pem"
+ ssl_key: "/path/to/private_key.pem"
+
- name: Export a HTTPS certificate.
dellemc.openmanage.idrac_certificates:
idrac_ip: "192.168.0.1"
@@ -166,6 +188,17 @@ EXAMPLES = r'''
certificate_type: "CSC"
certificate_path: "/path/to/cert.pem"
+- name: Import a custom certificate with a passphrase.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "/path/to/idrac_cert.p12"
+ passphrase: "cert_passphrase"
+ reset: false
+
- name: Export a Client trust certificate.
dellemc.openmanage.idrac_certificates:
idrac_ip: "192.168.0.1"
@@ -175,7 +208,7 @@ EXAMPLES = r'''
command: "export"
certificate_type: "CLIENT_TRUST_CERTIFICATE"
certificate_path: "/home/omam/mycert_dir"
-'''
+"""
RETURN = r'''
---
@@ -183,7 +216,7 @@ msg:
type: str
description: Status of the certificate configuration operation.
returned: always
- sample: "Successfully performed the operation generate_csr."
+ sample: "Successfully performed the 'generate_csr' certificate operation."
certificate_path:
type: str
description: The csr or exported certificate file path
@@ -221,43 +254,50 @@ from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import reset_idrac
-NOT_SUPPORTED_ACTION = "Certificate {op} not supported for the specified certificate type {certype}."
-SUCCESS_MSG = "Successfully performed the '{command}' operation."
+IMPORT_SSL_CERTIFICATE = "#DelliDRACCardService.ImportSSLCertificate"
+EXPORT_SSL_CERTIFICATE = "#DelliDRACCardService.ExportSSLCertificate"
+IDRAC_CARD_SERVICE_ACTION_URI = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions"
+
+NOT_SUPPORTED_ACTION = "Certificate '{operation}' not supported for the specified certificate type '{cert_type}'."
+SUCCESS_MSG = "Successfully performed the '{command}' certificate operation."
+SUCCESS_MSG_SSL = "Successfully performed the SSL key upload and '{command}' certificate operation."
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_MSG = "Changes found to be applied."
+WAIT_NEGATIVE_OR_ZERO_MSG = "The value for the `wait` parameter cannot be negative or zero."
SYSTEM_ID = "System.Embedded.1"
MANAGER_ID = "iDRAC.Embedded.1"
-ACTIONS_PFIX = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService."
+ACTIONS_PFIX = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService."
SYSTEMS_URI = "/redfish/v1/Systems"
MANAGERS_URI = "/redfish/v1/Managers"
-IDRAC_SERVICE = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService"
+IDRAC_SERVICE = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService"
CSR_SSL = "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR"
-IMPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate"
-EXPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate"
-RESET_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg"
+IMPORT_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ImportSSLCertificate"
+UPLOAD_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.UploadSSLKey"
+EXPORT_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ExportSSLCertificate"
+RESET_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.SSLResetCfg"
IDRAC_RESET = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset"
idrac_service_actions = {
- "#DelliDRACCardService.DeleteCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.DeleteCertificate",
- "#DelliDRACCardService.ExportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportCertificate",
- "#DelliDRACCardService.ExportSSLCertificate": EXPORT_SSL,
+ "#DelliDRACCardService.DeleteCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.DeleteCertificate",
+ "#DelliDRACCardService.ExportCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ExportCertificate",
+ EXPORT_SSL_CERTIFICATE: EXPORT_SSL,
"#DelliDRACCardService.FactoryIdentityCertificateGenerateCSR":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR",
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR",
"#DelliDRACCardService.FactoryIdentityExportCertificate":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityExportCertificate",
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityExportCertificate",
"#DelliDRACCardService.FactoryIdentityImportCertificate":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityImportCertificate",
- "#DelliDRACCardService.GenerateSEKMCSR": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.GenerateSEKMCSR",
- "#DelliDRACCardService.ImportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportCertificate",
- "#DelliDRACCardService.ImportSSLCertificate": IMPORT_SSL,
- "#DelliDRACCardService.SSLResetCfg": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg",
- "#DelliDRACCardService.iDRACReset": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.iDRACReset"
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityImportCertificate",
+ "#DelliDRACCardService.GenerateSEKMCSR": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.GenerateSEKMCSR",
+ "#DelliDRACCardService.ImportCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ImportCertificate",
+ IMPORT_SSL_CERTIFICATE: IMPORT_SSL,
+ "#DelliDRACCardService.UploadSSLKey": UPLOAD_SSL,
+ "#DelliDRACCardService.SSLResetCfg": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.SSLResetCfg",
+ "#DelliDRACCardService.iDRACReset": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.iDRACReset"
}
rfish_cert_coll = {'Server': {
"@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/NetworkProtocol/HTTPS/Certificates"
}}
-out_mapper = {}
out_file_path = {"CSRString": 'certificate_path',
"CertificateFile": 'certificate_path'}
changed_map = {"generate_csr": False, "import": True, "export": False, "reset": True}
@@ -271,91 +311,111 @@ csr_transform = {"common_name": "CommonName",
"organization_name": "Organization",
"subject_alt_name": 'AlternativeNames'}
action_url_map = {"generate_csr": {},
- "import": {'Server': "#DelliDRACCardService.ImportSSLCertificate",
- 'CA': "#DelliDRACCardService.ImportSSLCertificate",
- 'CSC': "#DelliDRACCardService.ImportSSLCertificate",
- 'ClientTrustCertificate': "#DelliDRACCardService.ImportSSLCertificate"},
- "export": {'Server': "#DelliDRACCardService.ExportSSLCertificate",
- 'CA': "#DelliDRACCardService.ExportSSLCertificate",
- 'CSC': "#DelliDRACCardService.ExportSSLCertificate",
- 'ClientTrustCertificate': "#DelliDRACCardService.ExportSSLCertificate"},
+ "import": {'Server': IMPORT_SSL_CERTIFICATE,
+ 'CA': IMPORT_SSL_CERTIFICATE,
+ 'CustomCertificate': IMPORT_SSL_CERTIFICATE,
+ 'CSC': IMPORT_SSL_CERTIFICATE,
+ 'ClientTrustCertificate': IMPORT_SSL_CERTIFICATE},
+ "export": {'Server': EXPORT_SSL_CERTIFICATE,
+ 'CA': EXPORT_SSL_CERTIFICATE,
+ 'CustomCertificate': EXPORT_SSL_CERTIFICATE,
+ 'CSC': EXPORT_SSL_CERTIFICATE,
+ 'ClientTrustCertificate': EXPORT_SSL_CERTIFICATE},
"reset": {'Server': "#DelliDRACCardService.SSLResetCfg"}}
dflt_url_map = {"generate_csr": {'Server': CSR_SSL},
"import": {'Server': IMPORT_SSL,
'CA': IMPORT_SSL,
+ 'CUSTOMCERTIFICATE': IMPORT_SSL,
'CSC': IMPORT_SSL,
'ClientTrustCertificate': IMPORT_SSL},
"export": {'Server': EXPORT_SSL,
'CA': EXPORT_SSL,
+ 'CUSTOMCERTIFICATE': EXPORT_SSL,
'CSC': EXPORT_SSL,
'ClientTrustCertificate': EXPORT_SSL},
"reset": {'Server': RESET_SSL}}
-certype_map = {'HTTPS': "Server", 'CA': "CA", 'CSC': "CSC",
+certype_map = {'HTTPS': "Server", 'CA': "CA", 'CUSTOMCERTIFICATE': "CustomCertificate", 'CSC': "CSC",
'CLIENT_TRUST_CERTIFICATE': "ClientTrustCertificate"}
-def get_ssl_payload(module, op, certype):
+def get_ssl_payload(module, operation, cert_type):
payload = {}
method = 'POST'
- if op == 'import':
- payload["CertificateType"] = certype
- if module.params.get('passphrase'):
- payload['Passphrase'] = module.params.get('passphrase')
- fpath = module.params.get('certificate_path')
- try:
- if str(fpath).lower().endswith('.p12') or str(fpath).lower().endswith(
- '.pfx'): # Linux generates .p12 Windows .pfx
- with open(fpath, 'rb') as cert:
- cert_content = cert.read()
- cert_file = base64.encodebytes(cert_content).decode('ascii')
- else:
- with open(fpath, "r") as cert:
- cert_file = cert.read()
- except OSError as file_err:
- module.exit_json(msg=str(file_err), failed=True)
- payload['SSLCertificateFile'] = cert_file
- elif op == 'export':
- payload['SSLCertType'] = certype
- elif op == 'generate_csr':
- payload = {}
- cert_params = module.params.get("cert_params")
- for k, v in csr_transform.items():
- payload[v] = cert_params.get(k)
- if rfish_cert_coll.get(certype):
- payload["CertificateCollection"] = rfish_cert_coll.get(certype)
- elif op == 'reset':
- payload = "{}"
+
+ if operation == 'import':
+ payload = _build_import_payload(module, cert_type)
+ elif operation == 'export':
+ payload = {"SSLCertType": cert_type}
+ elif operation == 'generate_csr':
+ payload = _build_generate_csr_payload(module, cert_type)
+ elif operation == 'reset':
+ payload = '{}'
+
return payload, method
+def _build_import_payload(module, cert_type):
+ payload = {"CertificateType": cert_type}
+
+ if module.params.get('passphrase'):
+ payload['Passphrase'] = module.params.get('passphrase')
+
+ cert_path = module.params.get('certificate_path')
+ try:
+ if str(cert_path).lower().endswith('.p12') or str(cert_path).lower().endswith('.pfx'):
+ with open(cert_path, 'rb') as cert_file:
+ cert_content = cert_file.read()
+ cert_file_content = base64.encodebytes(cert_content).decode('ascii')
+ else:
+ with open(cert_path, "r") as cert_file:
+ cert_file_content = cert_file.read()
+ except OSError as file_error:
+ module.exit_json(msg=str(file_error), failed=True)
+
+ payload['SSLCertificateFile'] = cert_file_content
+ return payload
+
+
+def _build_generate_csr_payload(module, cert_type):
+ payload = {}
+ cert_params = module.params.get("cert_params")
+
+ for key, value in csr_transform.items():
+ if cert_params.get(key) is not None:
+ payload[value] = cert_params.get(key)
+
+ if rfish_cert_coll.get(cert_type):
+ payload["CertificateCollection"] = rfish_cert_coll.get(cert_type)
+
+ return payload
+
+
payload_map = {"Server": get_ssl_payload,
"CA": get_ssl_payload,
+ "CustomCertificate": get_ssl_payload,
"CSC": get_ssl_payload,
"ClientTrustCertificate": get_ssl_payload}
-def get_res_id(idrac, certype):
+def get_res_id(idrac, cert_type):
cert_map = {"Server": MANAGER_ID}
try:
- resp = idrac.invoke_request("GET", cert_map.get(certype, MANAGERS_URI))
+ resp = idrac.invoke_request(cert_map.get(cert_type, MANAGERS_URI), "GET")
membs = resp.json_data.get("Members")
res_uri = membs[0].get('@odata.id') # Getting the first item
res_id = res_uri.split("/")[-1]
except Exception:
- res_id = cert_map.get(certype, MANAGER_ID)
+ res_id = cert_map.get(cert_type, MANAGER_ID)
return res_id
def get_idrac_service(idrac, res_id):
srvc = IDRAC_SERVICE.format(res_id=res_id)
- try:
- resp = idrac.invoke_request('GET', "{0}/{1}".format(MANAGERS_URI, res_id))
- srvc_data = resp.json_data
- dell_srvc = srvc_data['Links']['Oem']['Dell']['DelliDRACCardService']
- srvc = dell_srvc.get("@odata.id", IDRAC_SERVICE.format(res_id=res_id))
- except Exception:
- srvc = IDRAC_SERVICE.format(res_id=res_id)
+ resp = idrac.invoke_request(f"{MANAGERS_URI}/{res_id}", 'GET')
+ srvc_data = resp.json_data
+ dell_srvc = srvc_data['Links']['Oem']['Dell']['DelliDRACCardService']
+ srvc = dell_srvc.get("@odata.id", IDRAC_SERVICE.format(res_id=res_id))
return srvc
@@ -365,45 +425,63 @@ def get_actions_map(idrac, idrac_service_uri):
resp = idrac.invoke_request(idrac_service_uri, 'GET')
srvc_data = resp.json_data
actions = dict((k, v.get('target')) for k, v in srvc_data.get('Actions').items())
- except Exception as exc:
+ except Exception:
actions = idrac_service_actions
return actions
-def get_cert_url(actions, op, certype, res_id):
- idrac_key = action_url_map.get(op).get(certype)
+def get_cert_url(actions, operation, cert_type, res_id):
+ idrac_key = action_url_map.get(operation).get(cert_type)
dynurl = actions.get(idrac_key)
if not dynurl:
- dynurl = dflt_url_map.get(op).get(certype)
+ dynurl = dflt_url_map.get(operation).get(cert_type)
if dynurl:
dynurl = dynurl.format(res_id=res_id)
return dynurl
-def certificate_action(module, idrac, actions, op, certype, res_id):
- cert_url = get_cert_url(actions, op, certype, res_id)
+def upload_ssl_key(module, idrac, actions, ssl_key, res_id):
+ if not os.path.exists(ssl_key) or os.path.isdir(ssl_key):
+ module.exit_json(msg=f"Unable to locate the SSL key file at {ssl_key}.", failed=True)
+
+ try:
+ with open(ssl_key, "r") as file:
+ scert_file = file.read()
+ except OSError as err:
+ module.exit_json(msg=str(err), failed=True)
+
+ if not module.check_mode:
+ upload_url = actions.get("#DelliDRACCardService.UploadSSLKey")
+ if not upload_url:
+ module.exit_json("Upload of SSL key not supported", failed=True)
+
+ payload = {}
+ payload = {'SSLKeyString': scert_file}
+ idrac.invoke_request(upload_url.format(res_id=res_id), "POST", data=payload)
+
+
+def certificate_action(module, idrac, actions, operation, cert_type, res_id):
+ cert_url = get_cert_url(actions, operation, cert_type, res_id)
if not cert_url:
- module.exit_json(msg=NOT_SUPPORTED_ACTION.format(op=op, certype=module.params.get('certificate_type')))
- cert_payload, method = payload_map.get(certype)(module, op, certype)
- exit_certificates(module, idrac, cert_url, cert_payload, method, certype, res_id)
+ module.exit_json(msg=NOT_SUPPORTED_ACTION.format(operation=operation, cert_type=module.params.get('certificate_type')))
+ cert_payload, method = payload_map.get(cert_type)(module, operation, cert_type)
+ exit_certificates(module, idrac, cert_url, cert_payload, method, cert_type, res_id)
def write_to_file(module, cert_data, dkey):
- f_ext = {'HTTPS': ".pem", 'CA': ".pem", 'CSC': ".crt", 'CLIENT_TRUST_CERTIFICATE': ".crt"}
+ f_ext = {'HTTPS': ".pem", 'CA': ".pem", "CUSTOMCERTIFICATE": ".crt", 'CSC': ".crt", 'CLIENT_TRUST_CERTIFICATE': ".crt"}
path = module.params.get('certificate_path')
if not (os.path.exists(path) or os.path.isdir(path)):
- module.exit_json(msg="Provided directory path '{0}' is not valid.".format(path), failed=True)
+ module.exit_json(msg=f"Provided directory path '{path}' is not valid.", failed=True)
if not os.access(path, os.W_OK):
- module.exit_json(msg="Provided directory path '{0}' is not writable. Please check if you "
- "have appropriate permissions.".format(path), failed=True)
+ module.exit_json(msg=f"Provided directory path '{path}' is not writable. Please check if you "
+ "have appropriate permissions.", failed=True)
d = datetime.now()
if module.params.get('command') == 'generate_csr':
ext = '.txt'
else:
ext = f_ext.get(module.params.get('certificate_type'))
- cert_file_name = "{0}_{1}{2}{3}_{4}{5}{6}_{7}{8}".format(
- module.params["idrac_ip"], d.date().year, d.date().month, d.date().day,
- d.time().hour, d.time().minute, d.time().second, module.params.get('certificate_type'), ext)
+ cert_file_name = f"{module.params['idrac_ip']}_{d.strftime('%Y%m%d_%H%M%S')}_{module.params.get('certificate_type')}{ext}"
file_name = os.path.join(path, cert_file_name)
write_data = cert_data.pop(dkey, None)
with open(file_name, "w") as fp:
@@ -412,48 +490,42 @@ def write_to_file(module, cert_data, dkey):
def format_output(module, cert_data):
- # cert_data = strip_substr_dict(cert_data, chkstr='@odata')
- result = {}
cp = cert_data.copy()
klist = cp.keys()
for k in klist:
if "message" in k.lower():
cert_data.pop(k, None)
- if k in out_mapper:
- cert_data[out_mapper.get(k)] = cert_data.pop(k, None)
if k in out_file_path:
write_to_file(module, cert_data, k)
- if result:
- cert_data.update({'result': result})
cert_data.pop("CertificateCollection", None)
return cert_data
-def get_export_data(idrac, certype, res_id):
+def get_export_data(idrac, cert_type, res_id):
try:
- resp = idrac.invoke_request(EXPORT_SSL.format(res_id=res_id), "POST", data={"SSLCertType": certype})
+ resp = idrac.invoke_request(EXPORT_SSL.format(res_id=res_id), "POST", data={"SSLCertType": cert_type})
cert_data = resp.json_data
except Exception:
cert_data = {"CertificateFile": ""}
return cert_data.get("CertificateFile")
-def exit_certificates(module, idrac, cert_url, cert_payload, method, certype, res_id):
+def exit_certificates(module, idrac, cert_url, cert_payload, method, cert_type, res_id):
cmd = module.params.get('command')
changed = changed_map.get(cmd)
reset = changed_map.get(cmd) and module.params.get('reset')
result = {"changed": changed}
reset_msg = ""
if changed:
- reset_msg = " Reset iDRAC to apply new certificate." \
- " Until iDRAC is reset, the old certificate will be active."
+ reset_msg = "Reset iDRAC to apply the new certificate." \
+ " Until the iDRAC is reset, the old certificate will remain active."
if module.params.get('command') == 'import':
- export_cert = get_export_data(idrac, certype, res_id)
+ export_cert = get_export_data(idrac, cert_type, res_id)
if cert_payload.get('SSLCertificateFile') in export_cert:
module.exit_json(msg=NO_CHANGES_MSG)
if module.check_mode and changed:
module.exit_json(msg=CHANGES_MSG, changed=changed)
- if module.params.get('command') == 'reset' and certype == "Server":
+ if module.params.get('command') == 'reset' and cert_type == "Server":
resp = idrac.invoke_request(cert_url, method, data=cert_payload, dump=False)
else:
resp = idrac.invoke_request(cert_url, method, data=cert_payload)
@@ -462,7 +534,10 @@ def exit_certificates(module, idrac, cert_url, cert_payload, method, certype, re
result.update(cert_output)
if reset:
reset, track_failed, reset_msg = reset_idrac(idrac, module.params.get('wait'), res_id)
- result['msg'] = "{0}{1}".format(SUCCESS_MSG.format(command=cmd), reset_msg)
+ if cmd == "import" and cert_type == "Server" and module.params.get('ssl_key'):
+ result['msg'] = "{0} {1}".format(SUCCESS_MSG_SSL.format(command=cmd), reset_msg)
+ else:
+ result['msg'] = "{0}{1}".format(SUCCESS_MSG.format(command=cmd), reset_msg)
module.exit_json(**result)
@@ -471,8 +546,9 @@ def main():
"command": {"type": 'str', "default": 'generate_csr',
"choices": ['generate_csr', 'export', 'import', 'reset']},
"certificate_type": {"type": 'str', "default": 'HTTPS',
- "choices": ['HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE']},
+ "choices": ['HTTPS', 'CA', 'CUSTOMCERTIFICATE', 'CSC', 'CLIENT_TRUST_CERTIFICATE']},
"certificate_path": {"type": 'path'},
+ "ssl_key": {"type": 'path'},
"passphrase": {"type": 'str', "no_log": True},
"cert_params": {"type": 'dict', "options": {
"common_name": {"type": 'str', "required": True},
@@ -480,7 +556,7 @@ def main():
"locality_name": {"type": 'str', "required": True},
"state_name": {"type": 'str', "required": True},
"country_code": {"type": 'str', "required": True},
- "email_address": {"type": 'str', "required": True},
+ "email_address": {"type": 'str'},
"organization_name": {"type": 'str', "required": True},
"subject_alt_name": {"type": 'list', "elements": 'str', "default": []}
}},
@@ -500,21 +576,26 @@ def main():
try:
with iDRACRedfishAPI(module.params) as idrac:
- certype = certype_map.get(module.params.get('certificate_type'))
- op = module.params.get('command')
+ cert_type = certype_map.get(module.params.get('certificate_type'))
+ operation = module.params.get('command')
res_id = module.params.get('resource_id')
if not res_id:
- res_id = get_res_id(idrac, certype)
+ res_id = get_res_id(idrac, cert_type)
idrac_service_uri = get_idrac_service(idrac, res_id)
actions_map = get_actions_map(idrac, idrac_service_uri)
- certificate_action(module, idrac, actions_map, op, certype, res_id)
+ if operation in ["import", "reset"] and module.params.get('reset') and module.params.get('wait') <= 0:
+ module.exit_json(msg=WAIT_NEGATIVE_OR_ZERO_MSG, failed=True)
+ ssl_key = module.params.get('ssl_key')
+ if operation == "import" and ssl_key is not None and cert_type == "Server":
+ upload_ssl_key(module, idrac, actions_map, ssl_key, res_id)
+ certificate_action(module, idrac, actions_map, operation, cert_type, res_id)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (ImportError, ValueError, RuntimeError, SSLValidationError,
ConnectionError, KeyError, TypeError, IndexError) as e:
- module.fail_json(msg=str(e))
+ module.exit_json(msg=str(e), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py
index e4d966345..8172e6838 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2018-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -30,7 +30,7 @@ options:
share_name:
description: Network share path of update repository. CIFS, NFS, HTTP, HTTPS and FTP share types are supported.
type: str
- required: True
+ required: true
share_user:
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
part of a domain else 'user'. This option is mandatory for CIFS Network Share.
@@ -47,44 +47,85 @@ options:
job_wait:
description: Whether to wait for job completion or not.
type: bool
- default: True
+ default: true
catalog_file_name:
description: Catalog file name relative to the I(share_name).
type: str
default: 'Catalog.xml'
ignore_cert_warning:
description: Specifies if certificate warnings are ignored when HTTPS share is used.
- If C(True) option is set, then the certificate warnings are ignored.
+ If C(true) option is set, then the certificate warnings are ignored.
type: bool
- default: True
+ default: true
apply_update:
description:
- - If I(apply_update) is set to C(True), then the packages are applied.
- - If I(apply_update) is set to C(False), no updates are applied, and a catalog report
+ - If I(apply_update) is set to C(true), then the packages are applied.
+ - If I(apply_update) is set to C(false), no updates are applied, and a catalog report
of packages is generated and returned.
type: bool
- default: True
+ default: true
reboot:
description:
- Provides the option to apply the update packages immediately or in the next reboot.
- - If I(reboot) is set to C(True), then the packages are applied immediately.
- - If I(reboot) is set to C(False), then the packages are staged and applied in the next reboot.
+ - If I(reboot) is set to C(true), then the packages are applied immediately.
+ - If I(reboot) is set to C(false), then the packages are staged and applied in the next reboot.
- Packages that do not require a reboot are applied immediately irrespective of I (reboot).
type: bool
- default: False
+ default: false
+ proxy_support:
+ description:
+ - Specifies if a proxy should be used.
+ - Proxy parameters are applicable on C(HTTP), C(HTTPS), and C(FTP) share type of repositories.
+ - C(ParametersProxy), sets the proxy parameters for the current firmware operation.
+ - C(DefaultProxy), iDRAC uses the proxy values set by default.
+ - Default Proxy can be set in the Lifecycle Controller attributes using M(dellemc.openmanage.idrac_attributes).
+ - C(Off), will not use the proxy.
+ - For iDRAC8 based servers, use proxy server with basic authentication.
+ - "For iDRAC9 based servers, ensure that you use digest authentication for the proxy server,
+ basic authentication is not supported."
+ choices: ["ParametersProxy", "DefaultProxy", "Off"]
+ type: str
+ default: "Off"
+ proxy_server:
+ description:
+ - The IP address of the proxy server.
+ - "This IP will not be validated. The download job will be created even for invalid I(proxy_server).
+ Please check the results of the job for error details."
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ type: str
+ proxy_port:
+ description:
+ - The Port for the proxy server.
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ type: int
+ proxy_type:
+ description:
+ - The proxy type of the proxy server.
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ - "Note: SOCKS4 proxy does not support IPv6 address."
+ choices: [HTTP, SOCKS]
+ type: str
+ proxy_uname:
+ description: The user name for the proxy server.
+ type: str
+ proxy_passwd:
+ description: The password for the proxy server.
+ type: str
requirements:
- - "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "omsdk >= 1.2.503"
+ - "python >= 3.9.6"
author:
- "Rajeev Arakkal (@rajeevarakkal)"
- "Felix Stephen (@felixs88)"
+ - "Jagadeesh N V (@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- Module will report success based on the iDRAC firmware update parent job status if there are no individual
component jobs present.
- For server with iDRAC firmware 5.00.00.00 and later, if the repository contains unsupported packages, then the
module will return success with a proper message.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip) and I(share_name).
- This module supports C(check_mode).
'''
@@ -97,9 +138,9 @@ EXAMPLES = """
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.0:/share"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
catalog_file_name: "Catalog.xml"
- name: Update firmware from repository on a CIFS Share
@@ -111,9 +152,9 @@ EXAMPLES = """
share_name: "full_cifs_path"
share_user: "share_user"
share_password: "share_password"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
catalog_file_name: "Catalog.xml"
- name: Update firmware from repository on a HTTP
@@ -123,9 +164,9 @@ EXAMPLES = """
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "http://downloads.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
- name: Update firmware from repository on a HTTPS
dellemc.openmanage.idrac_firmware:
@@ -134,9 +175,26 @@ EXAMPLES = """
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "https://downloads.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
+
+- name: Update firmware from repository on a HTTPS via proxy
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://downloads.dell.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: ParametersProxy
+ proxy_server: 192.168.1.10
+ proxy_type: HTTP
+ proxy_port: 80
+ proxy_uname: "proxy_user"
+ proxy_passwd: "proxy_pwd"
- name: Update firmware from repository on a FTP
dellemc.openmanage.idrac_firmware:
@@ -144,10 +202,10 @@ EXAMPLES = """
idrac_user: "user_name"
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "ftp://ftp.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ share_name: "ftp://ftp.mydomain.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
"""
RETURN = """
@@ -182,12 +240,11 @@ from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac i
from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
try:
from omsdk.sdkcreds import UserCredentials
from omsdk.sdkfile import FileOnShare
- from omsdk.http.sdkwsmanbase import WsManProtocolBase
HAS_OMSDK = True
except ImportError:
HAS_OMSDK = False
@@ -195,6 +252,7 @@ except ImportError:
SHARE_TYPE = {'nfs': 'NFS', 'cifs': 'CIFS', 'ftp': 'FTP',
'http': 'HTTP', 'https': 'HTTPS', 'tftp': 'TFTP'}
CERT_WARN = {True: 'On', False: 'Off'}
+PROXY_SUPPORT = {"DefaultProxy": "Use_Default_Settings", "Off": "Off", "ParametersProxy": "Use_Custom_Settings"}
IDRAC_PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService"
PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService/Actions/" \
"DellSoftwareInstallationService.InstallFromRepository"
@@ -202,6 +260,9 @@ GET_REPO_BASED_UPDATE_LIST_PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/De
"Actions/DellSoftwareInstallationService.GetRepoBasedUpdateList"
JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}"
iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+LOG_SERVICE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog"
+iDRAC9_LC_LOG = "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog/Entries"
+iDRAC8_LC_LOG = "/redfish/v1/Managers/iDRAC.Embedded.1/Logs/Lclog"
MESSAGE = "Firmware versions on server match catalog, applicable updates are not present in the repository."
EXIT_MESSAGE = "The catalog in the repository specified in the operation has the same firmware versions " \
"as currently present on the server."
@@ -345,16 +406,59 @@ def handle_HTTP_error(module, httperr):
module.fail_json(msg=err_message)
-def update_firmware_url_redfish(module, idrac, share_name, apply_update, reboot, job_wait, payload, repo_urls):
+def get_error_syslog(idrac, curr_time, uri):
+ error_log_found = False
+ msg = None
+ # 'SYS226' Unable to transfer a file, Catalog/Catalog.xml, because of the
+ # reason described by the code 404 sent by the HTTP remote host server.
+ # 'SYS252' Unable to transfer a file, Catalog/Catalog.xml, because the file is
+ # not available at the remote host location.
+ # 'SYS261' Unable to transfer the file, Catalog/catalog.xml, because initial network
+ # connection to the remote host server is not successfully started.
+ error_log_ids = ['SYS229', 'SYS227', 'RED132', 'JCP042', 'RED068', 'RED137']
+ intrvl = 5
+ retries = 60 // intrvl
+ try:
+ if not curr_time:
+ resp = idrac.invoke_request(LOG_SERVICE_URI, "GET")
+ uri = resp.json_data.get('Entries').get('@odata.id')
+ curr_time = resp.json_data.get('DateTime')
+ fltr = "?$filter=Created%20ge%20'{0}'".format(curr_time)
+ fltr_uri = "{0}{1}".format(uri, fltr)
+ while retries:
+ resp = idrac.invoke_request(fltr_uri, "GET")
+ logs_list = resp.json_data.get("Members")
+ for log in logs_list:
+ for err_id in error_log_ids:
+ if err_id in log.get('MessageId'):
+ error_log_found = True
+ msg = log.get('Message')
+ break
+ if msg or error_log_found:
+ break
+ if msg or error_log_found:
+ break
+ retries = retries - 1
+ time.sleep(intrvl)
+ else:
+ msg = "No Error log found."
+ error_log_found = False
+ except Exception:
+ msg = "No Error log found."
+ error_log_found = False
+ return error_log_found, msg
+
+
+def update_firmware_url_redfish(module, idrac, share_path, apply_update, reboot, job_wait, payload, repo_urls):
"""Update firmware through HTTP/HTTPS/FTP and return the job details."""
- repo_url = urlparse(share_name)
+ repo_url = urlparse(share_path)
job_details, status = {}, {}
ipaddr = repo_url.netloc
share_type = repo_url.scheme
sharename = repo_url.path.strip('/')
- payload['IPAddress'] = ipaddr
if repo_url.path:
payload['ShareName'] = sharename
+ payload['IPAddress'] = ipaddr
payload['ShareType'] = SHARE_TYPE[share_type]
install_url = PATH
get_repo_url = GET_REPO_BASED_UPDATE_LIST_PATH
@@ -363,8 +467,18 @@ def update_firmware_url_redfish(module, idrac, share_name, apply_update, reboot,
install_url = actions.get("#DellSoftwareInstallationService.InstallFromRepository", {}).get("target", PATH)
get_repo_url = actions.get("#DellSoftwareInstallationService.GetRepoBasedUpdateList", {}).\
get("target", GET_REPO_BASED_UPDATE_LIST_PATH)
+ try:
+ log_resp = idrac.invoke_request(LOG_SERVICE_URI, "GET")
+ log_uri = log_resp.json_data.get('Entries').get('@odata.id')
+ curr_time = log_resp.json_data.get('DateTime')
+ except Exception:
+ log_uri = iDRAC9_LC_LOG
+ curr_time = None
resp = idrac.invoke_request(install_url, method="POST", data=payload)
+ error_log_found, msg = get_error_syslog(idrac, curr_time, log_uri)
job_id = get_jobid(module, resp)
+ if error_log_found:
+ module.exit_json(msg=msg, failed=True, job_id=job_id)
resp, msg = wait_for_job_completion(module, JOB_URI.format(job_id=job_id), job_wait, reboot, apply_update)
if not msg:
status = resp.json_data
@@ -388,17 +502,25 @@ def update_firmware_url_omsdk(module, idrac, share_name, catalog_file_name, appl
ipaddr = repo_url.netloc
share_type = repo_url.scheme
sharename = repo_url.path.strip('/')
+ proxy_support = PROXY_SUPPORT[module.params["proxy_support"]]
+ proxy_type = module.params.get("proxy_type") if module.params.get("proxy_type") is not None else "HTTP"
+ proxy_server = module.params.get("proxy_server") if module.params.get("proxy_server") is not None else ""
+ proxy_port = module.params.get("proxy_port") if module.params.get("proxy_port") is not None else 80
+ proxy_uname = module.params.get("proxy_uname")
+ proxy_passwd = module.params.get("proxy_passwd")
if ipaddr == "downloads.dell.com":
- status = idrac.update_mgr.update_from_dell_repo_url(ipaddress=ipaddr, share_type=share_type,
- share_name=sharename, catalog_file=catalog_file_name,
- apply_update=apply_update, reboot_needed=reboot,
- ignore_cert_warning=ignore_cert_warning, job_wait=job_wait)
+ status = idrac.update_mgr.update_from_dell_repo_url(
+ ipaddress=ipaddr, share_type=share_type, share_name=sharename, catalog_file=catalog_file_name,
+ apply_update=apply_update, reboot_needed=reboot, ignore_cert_warning=ignore_cert_warning, job_wait=job_wait,
+ proxy_support=proxy_support, proxy_type=proxy_type, proxy_server=proxy_server, proxy_port=proxy_port,
+ proxy_uname=proxy_uname, proxy_passwd=proxy_passwd)
get_check_mode_status(status, module)
else:
- status = idrac.update_mgr.update_from_repo_url(ipaddress=ipaddr, share_type=share_type,
- share_name=sharename, catalog_file=catalog_file_name,
- apply_update=apply_update, reboot_needed=reboot,
- ignore_cert_warning=ignore_cert_warning, job_wait=job_wait)
+ status = idrac.update_mgr.update_from_repo_url(
+ ipaddress=ipaddr, share_type=share_type, share_name=sharename, catalog_file=catalog_file_name,
+ apply_update=apply_update, reboot_needed=reboot, ignore_cert_warning=ignore_cert_warning, job_wait=job_wait,
+ proxy_support=proxy_support, proxy_type=proxy_type, proxy_server=proxy_server,
+ proxy_port=proxy_port, proxy_uname=proxy_uname, proxy_passwd=proxy_passwd)
get_check_mode_status(status, module)
return status, job_details
@@ -434,8 +556,8 @@ def update_firmware_omsdk(idrac, module):
upd_share = FileOnShare(remote="{0}{1}{2}".format(share_name, os.sep, catalog_file_name),
mount_point=module.params['share_mnt'], isFolder=False,
creds=UserCredentials(share_user, share_pwd))
- msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share, apply_update=apply_update,
- reboot_needed=reboot, job_wait=job_wait)
+ msg['update_status'] = idrac.update_mgr.update_from_repo(
+ upd_share, apply_update=apply_update, reboot_needed=reboot, job_wait=job_wait,)
get_check_mode_status(msg['update_status'], module)
json_data, repo_status, failed = msg['update_status']['job_details'], False, False
@@ -512,6 +634,20 @@ def update_firmware_redfish(idrac, module, repo_urls):
payload['Password'] = share_pwd
if share_name.lower().startswith(('http://', 'https://', 'ftp://')):
+ proxy = module.params.get("proxy_support")
+ if proxy == "ParametersProxy":
+ proxy_dict = {"proxy_server": "ProxyServer",
+ "proxy_port": "ProxyPort",
+ "proxy_support": "ProxySupport",
+ "proxy_type": "ProxyType",
+ "proxy_uname": "ProxyUname",
+ "proxy_passwd": "ProxyPasswd"}
+ for pk, pv in proxy_dict.items():
+ prm = module.params.get(pk)
+ if prm is not None:
+ payload[pv] = prm
+ elif proxy == "DefaultProxy":
+ payload["ProxySupport"] = module.params.get("proxy_support")
msg['update_status'], job_details = update_firmware_url_redfish(
module, idrac, share_name, apply_update, reboot, job_wait, payload, repo_urls)
if job_details:
@@ -523,8 +659,8 @@ def update_firmware_redfish(idrac, module, repo_urls):
payload['ShareName'] = '\\'.join(cifs[3:])
payload['ShareType'] = 'CIFS'
else:
- nfs = urlparse(share_name)
- payload['IPAddress'] = nfs.scheme
+ nfs = urlparse("nfs://" + share_name)
+ payload['IPAddress'] = nfs.netloc.strip(':')
payload['ShareName'] = nfs.path.strip('/')
payload['ShareType'] = 'NFS'
resp = idrac.invoke_request(PATH, method="POST", data=payload)
@@ -596,19 +732,30 @@ def update_firmware_redfish(idrac, module, repo_urls):
def main():
specs = {
"share_name": {"required": True, "type": 'str'},
- "share_user": {"required": False, "type": 'str'},
- "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
- "share_mnt": {"required": False, "type": 'str'},
-
- "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
- "reboot": {"required": False, "type": 'bool', "default": False},
- "job_wait": {"required": False, "type": 'bool', "default": True},
- "ignore_cert_warning": {"required": False, "type": 'bool', "default": True},
- "apply_update": {"required": False, "type": 'bool', "default": True},
+ "share_user": {"type": 'str'},
+ "share_password": {"type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"type": 'str'},
+
+ "catalog_file_name": {"type": 'str', "default": "Catalog.xml"},
+ "reboot": {"type": 'bool', "default": False},
+ "job_wait": {"type": 'bool', "default": True},
+ "ignore_cert_warning": {"type": 'bool', "default": True},
+ "apply_update": {"type": 'bool', "default": True},
+ # proxy params
+ "proxy_support": {"default": 'Off', "type": 'str', "choices": ["Off", "ParametersProxy", "DefaultProxy"]},
+ "proxy_type": {"type": 'str', "choices": ["HTTP", "SOCKS"]},
+ "proxy_server": {"type": 'str'},
+ "proxy_port": {"type": 'int'},
+ "proxy_uname": {"type": 'str'},
+ "proxy_passwd": {"type": 'str', "no_log": True},
}
specs.update(idrac_auth_params)
module = AnsibleModule(
argument_spec=specs,
+ required_if=[
+ # ['proxy_type', 'SOCKS', ('proxy_port',)],
+ ['proxy_support', 'ParametersProxy', ('proxy_server', 'proxy_type', 'proxy_port',)],
+ ],
supports_check_mode=True)
redfish_check = False
@@ -637,7 +784,10 @@ def main():
status = update_firmware_omsdk(idrac, module)
except HTTPError as err:
module.fail_json(msg=str(err), update_status=json.load(err))
- except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
+ except URLError as err:
+ message = err.reason if err.reason else err(str)
+ module.exit_json(msg=message, unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError, SSLError) as e:
module.fail_json(msg=str(e))
except Exception as exc:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py
index 3f644f85e..b4e4a37e4 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -24,10 +24,11 @@ extends_documentation_fragment:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Rajeev Arakkal (@rajeevarakkal)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -35,10 +36,10 @@ EXAMPLES = """
---
- name: Get Installed Firmware Inventory
dellemc.openmanage.idrac_firmware_info:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
"""
RETURN = r'''
@@ -109,13 +110,6 @@ from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac i
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-try:
- from omsdk.sdkfile import LocalFile
- from omsdk.catalog.sdkupdatemgr import UpdateManager
- from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper
- HAS_OMSDK = True
-except ImportError:
- HAS_OMSDK = False
# Main
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_license.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_license.py
new file mode 100644
index 000000000..565c61cd4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_license.py
@@ -0,0 +1,1118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.7.0
+# Copyright (C) 2024 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: idrac_license
+short_description: Configure iDRAC licenses
+version_added: "8.7.0"
+description:
+ - This module allows to import, export and delete licenses on iDRAC.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ license_id:
+ description:
+ - Entitlement ID of the license that is to be imported, exported or deleted.
+ - I(license_id) is required when I(delete) is C(true) or I(export) is C(true).
+ type: str
+ aliases: ['entitlement_id']
+ delete:
+ description:
+ - Delete the license from the iDRAC.
+ - When I(delete) is C(true), then I(license_id) is required.
+ - I(delete) is mutually exclusive with I(export) and I(import).
+ type: bool
+ default: false
+ export:
+ description:
+ - Export the license from the iDRAC.
+ - When I(export) is C(true), I(license_id) and I(share_parameters) is required.
+ - I(export) is mutually exclusive with I(delete) and I(import).
+ type: bool
+ default: false
+ import:
+ description:
+ - Import the license from the iDRAC.
+ - When I(import) is C(true), I(share_parameters) is required.
+ - I(import) is mutually exclusive with I(delete) and I(export).
+ type: bool
+ default: false
+ share_parameters:
+ description:
+ - Parameters that are required for the import and export operation of a license.
+ - I(share_parameters) is required when I(export) or I(import) is C(true).
+ type: dict
+ suboptions:
+ share_type:
+ description:
+ - Share type of the network share.
+ - C(local) uses local path for I(import) and I(export) operation.
+ - C(nfs) uses NFS share for I(import) and I(export) operation.
+ - C(cifs) uses CIFS share for I(import) and I(export) operation.
+ - C(http) uses HTTP share for I(import) and I(export) operation.
+ - C(https) uses HTTPS share for I(import) and I(export) operation.
+ type: str
+ choices: [local, nfs, cifs, http, https]
+ default: local
+ file_name:
+ description:
+ - License file name for I(import) and I(export) operation.
+ - I(file_name) is required when I(import) is C(true).
+ - For the I(import) operation, when I(share_type) is C(local), the supported extensions for I(file_name) are '.txt' and '.xml'.
+ For other share types, the supported extension is '.xml'
+ type: str
+ ip_address:
+ description:
+ - IP address of the network share.
+ - I(ip_address) is required when I(share_type) is C(nfs), C(cifs), C(http) or C(https).
+ type: str
+ share_name:
+ description:
+ - Network share or local path of the license file.
+ type: str
+ workgroup:
+ description:
+ - Workgroup of the network share.
+ - I(workgroup) is applicable only when I(share_type) is C(cifs).
+ type: str
+ username:
+ description:
+ - Username of the network share.
+ - I(username) is required when I(share_type) is C(cifs).
+ type: str
+ password:
+ description:
+ - Password of the network share.
+ - I(password) is required when I(share_type) is C(cifs).
+ type: str
+ ignore_certificate_warning:
+ description:
+ - Ignores the certificate warning while connecting to Share and is only applicable when I(share_type) is C(https).
+ - C(off) ignores the certificate warning.
+ - C(on) does not ignore the certificate warning.
+ type: str
+ choices: ["off", "on"]
+ default: "off"
+ proxy_support:
+ description:
+ - Specifies if proxy is to be used or not.
+ - C(off) does not use proxy settings.
+ - C(default_proxy) uses the default proxy settings.
+ - C(parameters_proxy) uses the specified proxy settings. I(proxy_server) is required when I(proxy_support) is C(parameters_proxy).
+ - I(proxy_support) is only applicable when I(share_type) is C(https) or C(https).
+ type: str
+ choices: ["off", "default_proxy", "parameters_proxy"]
+ default: "off"
+ proxy_type:
+ description:
+ - The proxy type of the proxy server.
+ - C(http) to select HTTP proxy.
+ - C(socks) to select SOCKS proxy.
+ - I(proxy_type) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ choices: [http, socks]
+ default: http
+ proxy_server:
+ description:
+ - The IP address of the proxy server.
+ - I(proxy_server) is required when I(proxy_support) is C(parameters_proxy).
+ - I(proxy_server) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ proxy_port:
+ description:
+ - The port of the proxy server.
+ - I(proxy_port) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: int
+ default: 80
+ proxy_username:
+ description:
+ - The username of the proxy server.
+ - I(proxy_username) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ proxy_password:
+ description:
+ - The password of the proxy server.
+ - I(proxy_password) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ resource_id:
+ type: str
+ description:
+ - Id of the resource.
+ - If the value for resource ID is not provided, the module picks the first resource ID available from the list of system resources returned by the iDRAC.
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Rajshekar P(@rajshekarp87)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports only iDRAC9 and above.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module does not support C(check_mode).
+ - When I(share_type) is C(local) for I(import) and I(export) operations, job_details are not displayed.
+"""
+
+EXAMPLES = r"""
+---
+- name: Export a license from iDRAC to local
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "local"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+
+- name: Export a license from iDRAC to NFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "nfs"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+
+- name: Export a license from iDRAC to CIFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "cifs"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ workgroup: "workgroup"
+
+- name: Export a license from iDRAC to HTTP share via proxy
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "http"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ proxy_support: "parameters_proxy"
+ proxy_type: socks
+ proxy_server: "192.168.0.2"
+ proxy_port: 1080
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+
+- name: Export a license from iDRAC to HTTPS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "https"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ignore_certificate_warning: "on"
+
+- name: Import a license to iDRAC from local
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: local
+ share_name: "/path/to/share"
+
+- name: Import a license to iDRAC from NFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: nfs
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+
+- name: Import a license to iDRAC from CIFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: cifs
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+
+- name: Import a license to iDRAC from HTTP share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: http
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+
+- name: Import a license to iDRAC from HTTPS share via proxy
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: https
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+ proxy_support: "parameters_proxy"
+ proxy_server: "192.168.0.2"
+ proxy_port: 808
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+
+- name: Delete a License from iDRAC
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENCE_123"
+ delete: true
+"""
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the license operation.
+ returned: always
+ sample: "Successfully exported the license."
+job_details:
+ description: Returns the output for status of the job.
+ returned: For import and export operations
+ type: dict
+ sample: {
+ "ActualRunningStartTime": "2024-01-09T05:16:19",
+ "ActualRunningStopTime": "2024-01-09T05:16:19",
+ "CompletionTime": "2024-01-09T05:16:19",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "LicenseExport",
+ "Message": "The command was successful.",
+ "MessageArgs": [],
+ "MessageId": "LIC900",
+ "Name": "Export: License",
+ "PercentComplete": 100,
+ "StartTime": "2024-01-09T05:16:19",
+ "TargetSettingsURI": null
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.8.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "Base.1.8.AccessDenied",
+ "Message": "The authentication credentials included with this request are missing or invalid.",
+ "MessageArgs": [],
+ "RelatedProperties": [],
+ "Severity": "Critical",
+ "Resolution": "Attempt to ensure that the URI is correct and that the service has the appropriate credentials."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+import os
+import base64
+from urllib.error import HTTPError, URLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.compat.version import LooseVersion
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (
+ get_idrac_firmware_version, get_dynamic_uri, get_manager_res_id,
+ validate_and_get_first_resource_id_uri, remove_key, idrac_redfish_job_tracking)
+
+REDFISH = "/redfish/v1"
+MANAGERS_URI = "/redfish/v1/Managers"
+IDRAC_JOB_URI = "{res_uri}/Jobs/{job_id}"
+
+OEM = "Oem"
+MANUFACTURER = "Dell"
+LICENSE_MANAGEMENT_SERVICE = "DellLicenseManagementService"
+ACTIONS = "Actions"
+EXPORT_LOCAL = "#DellLicenseManagementService.ExportLicense"
+EXPORT_NETWORK_SHARE = "#DellLicenseManagementService.ExportLicenseToNetworkShare"
+IMPORT_LOCAL = "#DellLicenseManagementService.ImportLicense"
+IMPORT_NETWORK_SHARE = "#DellLicenseManagementService.ImportLicenseFromNetworkShare"
+ODATA = "@odata.id"
+ODATA_REGEX = "(.*?)@odata"
+
+INVALID_LICENSE_MSG = "License with ID '{license_id}' does not exist on the iDRAC."
+SUCCESS_EXPORT_MSG = "Successfully exported the license."
+SUCCESS_DELETE_MSG = "Successfully deleted the license."
+SUCCESS_IMPORT_MSG = "Successfully imported the license."
+FAILURE_MSG = "Unable to '{operation}' the license with id '{license_id}' as it does not exist."
+FAILURE_IMPORT_MSG = "Unable to import the license."
+NO_FILE_MSG = "License file not found."
+UNSUPPORTED_FIRMWARE_MSG = "iDRAC firmware version is not supported."
+NO_OPERATION_SKIP_MSG = "Task is skipped as none of import, export or delete is specified."
+INVALID_FILE_MSG = "File extension is invalid. Supported extensions for local 'share_type' " \
+ "are: .txt and .xml, and for network 'share_type' is: .xml."
+INVALID_DIRECTORY_MSG = "Provided directory path '{path}' is not valid."
+INSUFFICIENT_DIRECTORY_PERMISSION_MSG = "Provided directory path '{path}' is not writable. " \
+ "Please check if the directory has appropriate permissions"
+MISSING_FILE_NAME_PARAMETER_MSG = "Missing required parameter 'file_name'."
+
+PROXY_SUPPORT = {"off": "Off", "default_proxy": "DefaultProxy", "parameters_proxy": "ParametersProxy"}
+
+
+class License():
+ def __init__(self, idrac, module):
+ """
+ Initializes the class instance with the provided idrac and module parameters.
+
+ :param idrac: The idrac parameter.
+ :type idrac: Any
+ :param module: The module parameter.
+ :type module: Any
+ """
+ self.idrac = idrac
+ self.module = module
+
+ def execute(self):
+ """
+ Executes the function with the given module.
+
+ :param module: The module to execute.
+ :type module: Any
+ :return: None
+ """
+
+ def check_license_id(self, license_id):
+ """
+ Check the license ID for a given operation.
+
+ :param self: The object instance.
+ :param module: The Ansible module.
+ :param license_id: The ID of the license to check.
+ :param operation: The operation to perform.
+ :return: The response from the license URL.
+ """
+ license_uri = self.get_license_url()
+ license_url = license_uri + f"/{license_id}"
+ try:
+ response = self.idrac.invoke_request(license_url, 'GET')
+ return response
+ except Exception:
+ self.module.exit_json(msg=INVALID_LICENSE_MSG.format(license_id=license_id), skipped=True)
+
+ def get_license_url(self):
+ """
+ Retrieves the license URL for the current user.
+
+ :return: The license URL as a string.
+ """
+ v1_resp = get_dynamic_uri(self.idrac, REDFISH)
+ license_service_url = v1_resp.get('LicenseService', {}).get(ODATA, {})
+ license_service_resp = get_dynamic_uri(self.idrac, license_service_url)
+ license_url = license_service_resp.get('Licenses', {}).get(ODATA, {})
+ return license_url
+
+ def get_job_status(self, license_job_response):
+ """
+ Get the status of a job.
+
+ Args:
+ module (object): The module object.
+ license_job_response (object): The response object for the license job.
+
+ Returns:
+ dict: The job details.
+ """
+ res_uri = validate_and_get_first_resource_id_uri(self.module, self.idrac, MANAGERS_URI)
+ job_tracking_uri = license_job_response.headers.get("Location")
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = IDRAC_JOB_URI.format(job_id=job_id, res_uri=res_uri[0])
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(self.idrac, job_uri)
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ if job_failed:
+ self.module.exit_json(
+ msg=job_dict.get('Message'),
+ failed=True,
+ job_details=job_dict)
+ return job_dict
+
+ def get_share_details(self):
+ """
+ Retrieves the share details from the given module.
+
+ Args:
+ module (object): The module object containing the share parameters.
+
+ Returns:
+ dict: A dictionary containing the share details with the following keys:
+ - IPAddress (str): The IP address of the share.
+ - ShareName (str): The name of the share.
+ - UserName (str): The username for accessing the share.
+ - Password (str): The password for accessing the share.
+ """
+ share_details = {}
+ share_details["IPAddress"] = self.module.params.get('share_parameters').get('ip_address')
+ share_details["ShareName"] = self.module.params.get('share_parameters').get('share_name')
+ share_details["UserName"] = self.module.params.get('share_parameters').get('username')
+ share_details["Password"] = self.module.params.get('share_parameters').get('password')
+ return share_details
+
+ def get_proxy_details(self):
+ """
+ Retrieves the proxy details based on the provided module parameters.
+
+ Args:
+ self: The instance of the class.
+ module: The module object containing the parameters.
+
+ Returns:
+ dict: A dictionary containing the proxy details.
+ """
+ proxy_details = {}
+ proxy_details["ShareType"] = self.module.params.get('share_parameters').get('share_type').upper()
+ share_details = self.get_share_details()
+ proxy_details.update(share_details)
+ proxy_details["IgnoreCertWarning"] = self.module.params.get('share_parameters').get('ignore_certificate_warning').capitalize()
+ if self.module.params.get('share_parameters').get('proxy_support') == "parameters_proxy":
+ proxy_details["ProxySupport"] = PROXY_SUPPORT[self.module.params.get('share_parameters').get('proxy_support')]
+ proxy_details["ProxyType"] = self.module.params.get('share_parameters').get('proxy_type').upper()
+ proxy_details["ProxyServer"] = self.module.params.get('share_parameters').get('proxy_server')
+ proxy_details["ProxyPort"] = str(self.module.params.get('share_parameters').get('proxy_port'))
+ if self.module.params.get('share_parameters').get('proxy_username') and self.module.params.get('share_parameters').get('proxy_password'):
+ proxy_details["ProxyUname"] = self.module.params.get('share_parameters').get('proxy_username')
+ proxy_details["ProxyPasswd"] = self.module.params.get('share_parameters').get('proxy_password')
+ return proxy_details
+
+
+class DeleteLicense(License):
+ def execute(self):
+ """
+ Executes the delete operation for a given license ID.
+
+ Args:
+ module (object): The Ansible module object.
+
+ Returns:
+ object: The response object from the delete operation.
+ """
+ license_id = self.module.params.get('license_id')
+ self.check_license_id(license_id)
+ license_url = self.get_license_url()
+ delete_license_url = license_url + f"/{license_id}"
+ delete_license_response = self.idrac.invoke_request(delete_license_url, 'DELETE')
+ status = delete_license_response.status_code
+ if status == 204:
+ self.module.exit_json(msg=SUCCESS_DELETE_MSG, changed=True)
+ else:
+ self.module.exit_json(msg=FAILURE_MSG.format(operation="delete", license_id=license_id), failed=True)
+
+
+class ExportLicense(License):
+ STATUS_SUCCESS = [200, 202]
+
+ def execute(self):
+ """
+ Executes the export operation for a given license ID.
+
+ :param module: The Ansible module object.
+ :type module: AnsibleModule
+
+ :return: The response from the export operation.
+ :rtype: Response
+ """
+ share_type = self.module.params.get('share_parameters').get('share_type')
+ license_id = self.module.params.get('license_id')
+ self.check_license_id(license_id)
+ export_license_url = self.__get_export_license_url()
+ job_status = {}
+ if share_type == "local":
+ export_license_response = self.__export_license_local(export_license_url)
+ elif share_type in ["http", "https"]:
+ export_license_response = self.__export_license_http(export_license_url)
+ job_status = self.get_job_status(export_license_response)
+ elif share_type == "cifs":
+ export_license_response = self.__export_license_cifs(export_license_url)
+ job_status = self.get_job_status(export_license_response)
+ elif share_type == "nfs":
+ export_license_response = self.__export_license_nfs(export_license_url)
+ job_status = self.get_job_status(export_license_response)
+ status = export_license_response.status_code
+ if status in self.STATUS_SUCCESS:
+ self.module.exit_json(msg=SUCCESS_EXPORT_MSG, changed=True, job_details=job_status)
+ else:
+ self.module.exit_json(msg=FAILURE_MSG.format(operation="export", license_id=license_id), failed=True, job_details=job_status)
+
+ def __export_license_local(self, export_license_url):
+ """
+ Export the license to a local directory.
+
+ Args:
+ module (object): The Ansible module object.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ object: The license status after exporting.
+ """
+ payload = {}
+ payload["EntitlementID"] = self.module.params.get('license_id')
+ path = self.module.params.get('share_parameters').get('share_name')
+ if not (os.path.exists(path) or os.path.isdir(path)):
+ self.module.exit_json(msg=INVALID_DIRECTORY_MSG.format(path=path), failed=True)
+ if not os.access(path, os.W_OK):
+ self.module.exit_json(msg=INSUFFICIENT_DIRECTORY_PERMISSION_MSG.format(path=path), failed=True)
+ license_name = self.module.params.get('share_parameters').get('file_name')
+ if license_name:
+ license_file_name = f"{license_name}_iDRAC_license.txt"
+ else:
+ license_file_name = f"{self.module.params['license_id']}_iDRAC_license.txt"
+ license_status = self.idrac.invoke_request(export_license_url, "POST", data=payload)
+ license_data = license_status.json_data
+ license_file = license_data.get("LicenseFile")
+ file_name = os.path.join(path, license_file_name)
+ with open(file_name, "w") as fp:
+ fp.writelines(license_file)
+ return license_status
+
+ def __export_license_http(self, export_license_url):
+ """
+ Export the license using the HTTP protocol.
+
+ Args:
+ module (object): The module object.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ str: The export status.
+ """
+ payload = {}
+ payload["EntitlementID"] = self.module.params.get('license_id')
+ proxy_details = self.get_proxy_details()
+ payload.update(proxy_details)
+ export_status = self.__export_license(payload, export_license_url)
+ return export_status
+
+ def __export_license_cifs(self, export_license_url):
+ """
+ Export the license using CIFS share type.
+
+ Args:
+ module (object): The Ansible module object.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ str: The export status.
+ """
+ payload = {}
+ payload["EntitlementID"] = self.module.params.get('license_id')
+ payload["ShareType"] = "CIFS"
+ if self.module.params.get('share_parameters').get('workgroup'):
+ payload["Workgroup"] = self.module.params.get('share_parameters').get('workgroup')
+ share_details = self.get_share_details()
+ payload.update(share_details)
+ export_status = self.__export_license(payload, export_license_url)
+ return export_status
+
+ def __export_license_nfs(self, export_license_url):
+ """
+ Export the license using NFS share type.
+
+ Args:
+ module (object): The Ansible module object.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ dict: The export status of the license.
+ """
+ payload = {}
+ payload["EntitlementID"] = self.module.params.get('license_id')
+ payload["ShareType"] = "NFS"
+ payload["IPAddress"] = self.module.params.get('share_parameters').get('ip_address')
+ payload["ShareName"] = self.module.params.get('share_parameters').get('share_name')
+ export_status = self.__export_license(payload, export_license_url)
+ return export_status
+
+ def __get_export_license_url(self):
+ """
+ Get the export license URL.
+
+ :param module: The module object.
+ :type module: object
+ :return: The export license URL.
+ :rtype: str
+ """
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, MANAGERS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ resp = get_dynamic_uri(self.idrac, uri)
+ url = resp.get('Links', {}).get(OEM, {}).get(MANUFACTURER, {}).get(LICENSE_MANAGEMENT_SERVICE, {}).get(ODATA, {})
+ action_resp = get_dynamic_uri(self.idrac, url)
+ license_service = EXPORT_LOCAL if self.module.params.get('share_parameters').get('share_type') == "local" else EXPORT_NETWORK_SHARE
+ export_url = action_resp.get(ACTIONS, {}).get(license_service, {}).get('target', {})
+ return export_url
+
+ def __export_license(self, payload, export_license_url):
+ """
+ Export the license to a file.
+
+ Args:
+ module (object): The Ansible module object.
+ payload (dict): The payload containing the license information.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ dict: The license status after exporting.
+ """
+ license_name = self.module.params.get('share_parameters').get('file_name')
+ if license_name:
+ license_file_name = f"{license_name}_iDRAC_license.xml"
+ else:
+ license_file_name = f"{self.module.params['license_id']}_iDRAC_license.xml"
+ payload["FileName"] = license_file_name
+ license_status = self.idrac.invoke_request(export_license_url, "POST", data=payload)
+ return license_status
+
+
+class ImportLicense(License):
+ STATUS_SUCCESS = [200, 202]
+
+ def execute(self):
+ """
+ Executes the import license process based on the given module parameters.
+
+ Args:
+ module (object): The Ansible module object.
+
+ Returns:
+ object: The response object from the import license API call.
+ """
+ if not self.module.params.get('share_parameters').get('file_name'):
+ self.module.exit_json(msg=MISSING_FILE_NAME_PARAMETER_MSG, failed=True)
+ share_type = self.module.params.get('share_parameters').get('share_type')
+ self.__check_file_extension()
+ import_license_url = self.__get_import_license_url()
+ resource_id = get_manager_res_id(self.idrac)
+ job_status = {}
+ if share_type == "local":
+ import_license_response = self.__import_license_local(import_license_url, resource_id)
+ elif share_type in ["http", "https"]:
+ import_license_response = self.__import_license_http(import_license_url, resource_id)
+ job_status = self.get_job_status(import_license_response)
+ elif share_type == "cifs":
+ import_license_response = self.__import_license_cifs(import_license_url, resource_id)
+ job_status = self.get_job_status(import_license_response)
+ elif share_type == "nfs":
+ import_license_response = self.__import_license_nfs(import_license_url, resource_id)
+ job_status = self.get_job_status(import_license_response)
+ status = import_license_response.status_code
+ if status in self.STATUS_SUCCESS:
+ self.module.exit_json(msg=SUCCESS_IMPORT_MSG, changed=True, job_details=job_status)
+ else:
+ self.module.exit_json(msg=FAILURE_IMPORT_MSG, failed=True, job_details=job_status)
+
+ def __import_license_local(self, import_license_url, resource_id):
+ """
+ Import a license locally.
+
+ Args:
+ module (object): The Ansible module object.
+ import_license_url (str): The URL for importing the license.
+ resource_id (str): The ID of the resource.
+
+ Returns:
+ dict: The import status of the license.
+ """
+ payload = {}
+ path = self.module.params.get('share_parameters').get('share_name')
+ if not (os.path.exists(path) or os.path.isdir(path)):
+ self.module.exit_json(msg=INVALID_DIRECTORY_MSG.format(path=path), failed=True)
+ file_path = self.module.params.get('share_parameters').get('share_name') + "/" + self.module.params.get('share_parameters').get('file_name')
+ file_exits = os.path.exists(file_path)
+ if file_exits:
+ with open(file_path, "rb") as cert:
+ cert_content = cert.read()
+ read_file = base64.encodebytes(cert_content).decode('ascii')
+ else:
+ self.module.exit_json(msg=NO_FILE_MSG, failed=True)
+ payload["LicenseFile"] = read_file
+ payload["FQDD"] = resource_id
+ payload["ImportOptions"] = "Force"
+ try:
+ import_status = self.idrac.invoke_request(import_license_url, "POST", data=payload)
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ message_details = filter_err.get('error').get('@Message.ExtendedInfo')[0]
+ message_id = message_details.get('MessageId')
+ if 'LIC018' in message_id:
+ self.module.exit_json(msg=message_details.get('Message'), skipped=True)
+ else:
+ self.module.exit_json(msg=message_details.get('Message'), error_info=filter_err, failed=True)
+ return import_status
+
+ def __import_license_http(self, import_license_url, resource_id):
+ """
+ Imports a license using HTTP.
+
+ Args:
+ module (object): The Ansible module object.
+ import_license_url (str): The URL for importing the license.
+ resource_id (str): The ID of the resource.
+
+ Returns:
+ object: The import status.
+ """
+ payload = {}
+ payload["LicenseName"] = self.module.params.get('share_parameters').get('file_name')
+ payload["FQDD"] = resource_id
+ payload["ImportOptions"] = "Force"
+ proxy_details = self.get_proxy_details()
+ payload.update(proxy_details)
+ import_status = self.idrac.invoke_request(import_license_url, "POST", data=payload)
+ return import_status
+
+ def __import_license_cifs(self, import_license_url, resource_id):
+ """
+ Imports a license using CIFS share type.
+
+ Args:
+ self (object): The instance of the class.
+ module (object): The Ansible module object.
+ import_license_url (str): The URL for importing the license.
+ resource_id (str): The ID of the resource.
+
+ Returns:
+ object: The import status of the license.
+ """
+ payload = {}
+ payload["ShareType"] = "CIFS"
+ payload["LicenseName"] = self.module.params.get('share_parameters').get('file_name')
+ payload["FQDD"] = resource_id
+ payload["ImportOptions"] = "Force"
+ if self.module.params.get('share_parameters').get('workgroup'):
+ payload["Workgroup"] = self.module.params.get('share_parameters').get('workgroup')
+ share_details = self.get_share_details()
+ payload.update(share_details)
+ import_status = self.idrac.invoke_request(import_license_url, "POST", data=payload)
+ return import_status
+
+ def __import_license_nfs(self, import_license_url, resource_id):
+ """
+ Import a license from an NFS share.
+
+ Args:
+ module (object): The Ansible module object.
+ import_license_url (str): The URL for importing the license.
+ resource_id (str): The ID of the resource.
+
+ Returns:
+ dict: The import status of the license.
+ """
+ payload = {}
+ payload["ShareType"] = "NFS"
+ payload["IPAddress"] = self.module.params.get('share_parameters').get('ip_address')
+ payload["ShareName"] = self.module.params.get('share_parameters').get('share_name')
+ payload["LicenseName"] = self.module.params.get('share_parameters').get('file_name')
+ payload["FQDD"] = resource_id
+ payload["ImportOptions"] = "Force"
+ import_status = self.idrac.invoke_request(import_license_url, "POST", data=payload)
+ return import_status
+
+ def __check_file_extension(self):
+ """
+ Check if the file extension of the given file name is valid.
+
+ :param module: The Ansible module object.
+ :type module: AnsibleModule
+
+ :return: None
+ """
+ share_type = self.module.params.get('share_parameters').get('share_type')
+ file_name = self.module.params.get('share_parameters').get('file_name')
+ valid_extensions = {".txt", ".xml"} if share_type == "local" else {".xml"}
+ file_extension = any(file_name.lower().endswith(ext) for ext in valid_extensions)
+ if not file_extension:
+ self.module.exit_json(msg=INVALID_FILE_MSG, failed=True)
+
+ def __get_import_license_url(self):
+ """
+ Get the import license URL.
+
+ :param module: The module object.
+ :type module: object
+ :return: The import license URL.
+ :rtype: str
+ """
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, MANAGERS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ resp = get_dynamic_uri(self.idrac, uri)
+ url = resp.get('Links', {}).get(OEM, {}).get(MANUFACTURER, {}).get(LICENSE_MANAGEMENT_SERVICE, {}).get(ODATA, {})
+ action_resp = get_dynamic_uri(self.idrac, url)
+ license_service = IMPORT_LOCAL if self.module.params.get('share_parameters').get('share_type') == "local" else IMPORT_NETWORK_SHARE
+ import_url = action_resp.get(ACTIONS, {}).get(license_service, {}).get('target', {})
+ return import_url
+
+ def get_job_status(self, license_job_response):
+ res_uri = validate_and_get_first_resource_id_uri(self.module, self.idrac, MANAGERS_URI)
+ job_tracking_uri = license_job_response.headers.get("Location")
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = IDRAC_JOB_URI.format(job_id=job_id, res_uri=res_uri[0])
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(self.idrac, job_uri)
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ if job_failed:
+ if job_dict.get('MessageId') == 'LIC018':
+ self.module.exit_json(msg=job_dict.get('Message'), skipped=True, job_details=job_dict)
+ else:
+ self.module.exit_json(
+ msg=job_dict.get('Message'),
+ failed=True,
+ job_details=job_dict)
+ return job_dict
+
+
+class LicenseType:
+ _license_classes = {
+ "import": ImportLicense,
+ "export": ExportLicense,
+ "delete": DeleteLicense,
+ }
+
+ @staticmethod
+ def license_operation(idrac, module):
+ """
+ Perform a license operation based on the given parameters.
+
+ :param idrac: The IDRAC object.
+ :type idrac: IDRAC
+ :param module: The Ansible module object.
+ :type module: AnsibleModule
+ :return: The license class object based on the license type.
+ :rtype: LicenseType
+ """
+ license_type = next((param for param in ["import", "export", "delete"] if module.params[param]), None)
+ if not license_type:
+ module.exit_json(msg=NO_OPERATION_SKIP_MSG, skipped=True)
+ license_class = LicenseType._license_classes.get(license_type)
+ return license_class(idrac, module)
+
+
+def main():
+ """
+ Main function that serves as the entry point for the program.
+
+ This function retrieves the argument specification using the `get_argument_spec` function and updates it with the `idrac_auth_params`.
+ It then creates an `AnsibleModule` object with the updated argument specification, specifying the mutually exclusive arguments,
+ required arguments if conditions are met, and setting `supports_check_mode` to `False`.
+
+ The function then attempts to establish a connection with the iDRAC Redfish API using the `iDRACRedfishAPI` class.
+ It retrieves the iDRAC firmware version using the `get_idrac_firmware_version` function and checks if it is less than or equal to '3.0'.
+ If it is, the function exits with a message indicating that the iDRAC firmware version is not supported and sets `failed` to `True`.
+
+ If the iDRAC firmware version is supported, the function creates a `LicenseType` object using the `license_operation` method of the
+ `LicenseType` class and calls the `execute` method on the `license_obj` object, passing in the `module` object.
+
+ If an `HTTPError` occurs, the function loads the error response as JSON, removes a specific key using a regular expression pattern,
+ and exits with the error message, the filtered error information, and sets `failed` to `True`.
+
+ If a `URLError` occurs, the function exits with the error message and sets `unreachable` to `True`.
+
+ If any of the following errors occur: `SSLValidationError`, `ConnectionError`, `TypeError`, `ValueError`, or `OSError`, the function
+ exits with the error message and sets `failed` to `True`.
+
+ Parameters:
+ None
+
+ Returns:
+ None
+ """
+ specs = get_argument_spec()
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[("import", "export", "delete")],
+ required_if=[
+ ["import", True, ("share_parameters",)],
+ ["export", True, ("license_id", "share_parameters",)],
+ ["delete", True, ("license_id",)]
+ ],
+ supports_check_mode=False
+ )
+
+ try:
+ with iDRACRedfishAPI(module.params) as idrac:
+ idrac_firmware_version = get_idrac_firmware_version(idrac)
+ if LooseVersion(idrac_firmware_version) <= '3.0':
+ module.exit_json(msg=UNSUPPORTED_FIRMWARE_MSG, failed=True)
+ license_obj = LicenseType.license_operation(idrac, module)
+ if license_obj:
+ license_obj.execute()
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ module.exit_json(msg=str(err), error_info=filter_err, failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+def get_argument_spec():
+ """
+ Returns a dictionary containing the argument spec for the get_argument_spec function.
+ The argument spec is a dictionary that defines the parameters and their types and options for the function.
+ The dictionary has the following keys:
+ - "license_id": A string representing the license ID.
+ - "delete": A boolean representing whether to delete the license.
+ - "export": A boolean representing whether to export the license.
+ - "import": A boolean representing whether to import the license.
+ - "share_parameters": A dictionary representing the share parameters.
+ - "type": A string representing the share type.
+ - "options": A dictionary representing the options for the share parameters.
+ - "share_type": A string representing the share type.
+ - "file_name": A string representing the file name.
+ - "ip_address": A string representing the IP address.
+ - "share_name": A string representing the share name.
+ - "workgroup": A string representing the workgroup.
+ - "username": A string representing the username.
+ - "password": A string representing the password.
+ - "ignore_certificate_warning": A string representing whether to ignore certificate warnings.
+ - "proxy_support": A string representing the proxy support.
+ - "proxy_type": A string representing the proxy type.
+ - "proxy_server": A string representing the proxy server.
+ - "proxy_port": A integer representing the proxy port.
+ - "proxy_username": A string representing the proxy username.
+ - "proxy_password": A string representing the proxy password.
+ - "required_if": A list of lists representing the required conditions for the share parameters.
+ - "required_together": A list of lists representing the required conditions for the share parameters.
+ - "resource_id": A string representing the resource ID.
+ """
+ return {
+ "license_id": {"type": 'str', "aliases": ['entitlement_id']},
+ "delete": {"type": 'bool', "default": False},
+ "export": {"type": 'bool', "default": False},
+ "import": {"type": 'bool', "default": False},
+ "share_parameters": {
+ "type": 'dict',
+ "options": {
+ "share_type": {
+ "type": 'str',
+ "default": 'local',
+ "choices": ['local', 'nfs', 'cifs', 'http', 'https']
+ },
+ "file_name": {"type": 'str'},
+ "ip_address": {"type": 'str'},
+ "share_name": {"type": 'str'},
+ "workgroup": {"type": 'str'},
+ "username": {"type": 'str'},
+ "password": {"type": 'str', "no_log": True},
+ "ignore_certificate_warning": {
+ "type": 'str',
+ "default": "off",
+ "choices": ["off", "on"]
+ },
+ "proxy_support": {
+ "type": 'str',
+ "default": "off",
+ "choices": ["off", "default_proxy", "parameters_proxy"]
+ },
+ "proxy_type": {
+ "type": 'str',
+ "default": 'http',
+ "choices": ['http', 'socks']
+ },
+ "proxy_server": {"type": 'str'},
+ "proxy_port": {"type": 'int', "default": 80},
+ "proxy_username": {"type": 'str'},
+ "proxy_password": {"type": 'str', "no_log": True}
+ },
+ "required_if": [
+ ["share_type", "local", ["share_name"]],
+ ["share_type", "nfs", ["ip_address", "share_name"]],
+ ["share_type", "cifs", ["ip_address", "share_name", "username", "password"]],
+ ["share_type", "http", ["ip_address", "share_name"]],
+ ["share_type", "https", ["ip_address", "share_name"]],
+ ["proxy_support", "parameters_proxy", ["proxy_server"]]
+ ],
+ "required_together": [
+ ("username", "password"),
+ ("proxy_username", "proxy_password")
+ ]
+ },
+ "resource_id": {"type": 'str'}
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py
index 2d555f9a2..c9376f4e2 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -23,17 +23,18 @@ extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
options:
job_id:
- required: True
+ required: true
type: str
description: JOB ID in the format "JID_123456789012".
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Rajeev Arakkal (@rajeevarakkal)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -41,11 +42,11 @@ EXAMPLES = """
---
- name: Show status of a Lifecycle Control job
dellemc.openmanage.idrac_lifecycle_controller_job_status_info:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "JID_1234567890"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "JID_1234567890"
"""
RETURN = r'''
@@ -113,7 +114,7 @@ def main():
try:
with iDRACConnection(module.params) as idrac:
- job_id, msg, failed = module.params.get('job_id'), {}, False
+ job_id, msg = module.params.get('job_id'), {}
msg = idrac.job_mgr.get_job_status(job_id)
if msg.get('Status') == "Found Fault":
module.fail_json(msg="Job ID is invalid.")
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py
index 984f8e3f4..60d1aaacb 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -31,12 +31,13 @@ options:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module does not support C(check_mode).
"""
EXAMPLES = """
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py
index 74606260c..4a9f30f68 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -28,7 +28,7 @@ options:
- Network share or local path.
- CIFS, NFS network share types are supported.
type: str
- required: True
+ required: true
share_user:
type: str
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
@@ -40,18 +40,19 @@ options:
job_wait:
description: Whether to wait for the running job completion or not.
type: bool
- default: True
+ default: true
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Rajeev Arakkal (@rajeevarakkal)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- Exporting data to a local share is supported only on iDRAC9-based PowerEdge Servers and later.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module does not support C(check_mode).
"""
@@ -133,11 +134,13 @@ error_info:
"""
+import socket
+import json
+import copy
from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-import json
try:
from omsdk.sdkfile import file_share_manager
from omsdk.sdkcreds import UserCredentials
@@ -181,6 +184,10 @@ def run_export_lc_logs(idrac, module):
creds=UserCredentials(module.params['share_user'],
module.params['share_password']),
isFolder=True)
+ data = socket.getaddrinfo(module.params["idrac_ip"], module.params["idrac_port"])
+ if "AF_INET6" == data[0][0]._name_:
+ ip = copy.deepcopy(module.params["idrac_ip"])
+ lclog_file_name_format = "{ip}_%Y%m%d_%H%M%S_LC_Log.log".format(ip=ip.replace(":", ".").replace("..", "."))
lc_log_file = myshare.new_file(lclog_file_name_format)
job_wait = module.params['job_wait']
msg = idrac.log_mgr.lclog_export(lc_log_file, job_wait)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py
index 3d3bddc03..94f605b46 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -19,18 +19,19 @@ module: idrac_lifecycle_controller_status_info
short_description: Get the status of the Lifecycle Controller
version_added: "2.1.0"
description:
- - This module shows the status of the Lifecycle Controller on a Dell EMC PowerEdge server.
+ - This module shows the status of the Lifecycle Controller on a Dell PowerEdge server.
extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Rajeev Arakkal (@rajeevarakkal)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -52,7 +53,7 @@ msg:
type: str
sample: "Successfully fetched the lifecycle controller status."
lc_status_info:
- description: Displays the status of the Lifecycle Controller on a Dell EMC PowerEdge server.
+ description: Displays the status of the Lifecycle Controller on a Dell PowerEdge server.
returned: success
type: dict
sample: {
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py
index 8f2930165..b03d0dc2c 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -136,13 +136,14 @@ options:
description: Enter the static IP subnet mask to iDRAC.
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -150,9 +151,9 @@ EXAMPLES = """
---
- name: Configure iDRAC network settings
dellemc.openmanage.idrac_network:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
register_idrac_on_dns: Enabled
dns_idrac_name: None
@@ -246,10 +247,8 @@ try:
Selection_NICTypes, Failover_NICTypes,
AutoDetect_NICTypes, Autoneg_NICTypes,
Speed_NICTypes, Duplex_NICTypes, DHCPEnable_IPv4Types,
- DNSFromDHCP_IPv4Types, Enable_IPv4Types,
- DNSFromDHCP_IPv4StaticTypes)
+ Enable_IPv4Types, DNSFromDHCP_IPv4StaticTypes)
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network_attributes.py
new file mode 100644
index 000000000..0103570be
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network_attributes.py
@@ -0,0 +1,748 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_network_attributes
+short_description: Configures the iDRAC network attributes
+version_added: "8.4.0"
+description:
+ - This module allows you to configure the port and partition network attributes on the network interface cards.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ network_adapter_id:
+ type: str
+ required: true
+ description:
+ - FQDD of the network adapter device that represents the physical network adapter capable of connecting to a computer network.
+ - An example of FQDD of the network adapter is 'NIC.Mezzanine.1A'
+ network_device_function_id:
+ type: str
+ required: true
+ description:
+ - FQDD of the network adapter device function that represents a logical interface exposed by the network adapter.
+ - An example of FQDD of the network adapter device function is 'NIC.Mezzanine.1A-1-1'
+ network_attributes:
+ type: dict
+ description:
+ - "Dictionary of network attributes and value. To view the list of attributes and its structure, see the below API
+ U(https://I(idrac_ip)/redfish/v1/Systems/System.Embedded.1/NetworkAdapters/<network_id>/NetworkDeviceFunctions/
+ <network_port_id>/Settings) and U(https://<idrac_ip>/redfish/v1/Schemas/NetworkDeviceFunction.v1_8_0.json)."
+ - I(network_attributes) is mutually exclusive with I(oem_network_attributes).
+ oem_network_attributes:
+ type: dict
+ description:
+ - "The attributes must be part of the Integrated Dell Remote Access Controller Attribute Registry.
+ To view the list of attributes in Attribute Registry for iDRAC9 and newer versions. For more information,
+ see, U(https://I(idrac_ip)/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/<network_id>/NetworkDeviceFunctions/
+ <network_port_id>/Oem/Dell/DellNetworkAttributes/<network_port_id>)
+ and U(https://I(idrac_ip)/redfish/v1/Registries/NetworkAttributesRegistry_<network_port_id>/
+ NetworkAttributesRegistry_network_port_id.json)."
+ - For iDRAC8 based servers, derive the network attribute name from Server Configuration Profile.
+ - I(oem_network_attributes) is mutually exclusive with I(network_attributes).
+ resource_id:
+ type: str
+ description:
+ - Id of the resource.
+ - If the value for resource ID is not provided, the module picks the first resource ID available from the list of system resources returned by the iDRAC.
+ clear_pending:
+ type: bool
+ default: false
+ description:
+ - This parameter allows you to clear all the pending OEM network attributes changes.
+ - C(false) does not perform any operation.
+ - C(true) discards any pending changes to network attributes, or if a job is in scheduled state, removes the job.
+ - I(apply_time) value will be ignored and will not have any impact for I(clear_pending) operation.
+ - This operation is not supported for iDRAC8.
+ apply_time:
+ type: str
+ required: true
+ description:
+ - Apply time of the I(network_attributes) and I(oem_network_attributes).
+ - This is applicable only to I(network_attributes) and I(oem_network_attributes).
+ - C(Immediate) allows the user to immediately reboot the host and apply the changes. I(job_wait)
+ is applicable. This is applicable for I(oem_network_attributes) and I(job_wait).
+ - C(OnReset) allows the user to apply the changes on the next reboot of the host server.
+ - C(AtMaintenanceWindowStart) allows the user to apply at the start of a maintenance window as specified
+ in I(maintenance_window). A reboot job is scheduled.
+ - C(InMaintenanceWindowOnReset) allows to apply after a manual reset but within the maintenance window as
+ specified in I(maintenance_window).
+ - This is not applicable for iDRAC8 and value will be ignored and will not have any impact for configuring I(oem_network_attributes).
+ choices: [Immediate, OnReset, AtMaintenanceWindowStart, InMaintenanceWindowOnReset]
+ maintenance_window:
+ type: dict
+ description:
+ - This option allows you to schedule the maintenance window.
+ - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset).
+ suboptions:
+ start_time:
+ type: str
+ required: true
+ description:
+ - The start time for the maintenance window to be scheduled.
+ - "The format is YYYY-MM-DDThh:mm:ss<offset>"
+ - "<offset> is the time offset from UTC that the current timezone set in
+ iDRAC in the format: +05:30 for IST."
+ duration:
+ type: int
+ required: true
+ description:
+ - The duration in seconds for the maintenance window.
+ job_wait:
+ type: bool
+ default: true
+ description:
+ - Provides the option to wait for job completion.
+ - This is applicable when I(apply_time) is C(Immediate) for I(oem_network_attributes).
+ job_wait_timeout:
+ type: int
+ default: 1200
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(true).
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Abhishek Sinha(@ABHISHEK-SINHA10)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure OEM network attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: "NIC.Integrated.1"
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+
+- name: Configure OEM network attributes to apply on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ apply_time: OnReset
+
+- name: Configure OEM network attributes to apply at maintainance window
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+
+- name: Clearing the pending attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ clear_pending: true
+
+- name: Clearing the OEM pending attributes and apply the OEM network attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ clear_pending: true
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+
+- name: Configure OEM network attributes and wait for the job
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ oem_network_attributes:
+ LnkSpeed: "10MbpsHalf"
+ WakeOnLan: "Enabled"
+ VLanMode: "Enabled"
+ job_wait: true
+ job_wait_timeout: 2000
+
+- name: Configure redfish network attributes to update fiber channel on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: OnReset
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: true
+
+- name: Configure redfish network attributes to apply on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: true
+ apply_time: OnReset
+
+- name: Configure redfish network attributes of iscsi to apply at maintainance window start
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ iSCSIBoot:
+ InitiatorIPAddress: 1.0.0.1
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+
+- name: Configure redfish network attributes to apply at maintainance window on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: false
+ VLANId: 1
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of the attribute update operation.
+ returned: when network attributes is applied
+ type: str
+ sample: "Successfully updated the network attributes."
+invalid_attributes:
+ description: Dictionary of invalid attributes provided that cannot be applied.
+ returned: On invalid attributes or values
+ type: dict
+ sample: {
+ "IscsiInitiatorIpAddr": "Attribute is not valid.",
+ "IscsiInitiatorSubnet": "Attribute is not valid."
+ }
+job_status:
+ description: Returns the output for status of the job.
+ returned: always
+ type: dict
+ sample: {
+ "ActualRunningStartTime": null,
+ "ActualRunningStopTime": null,
+ "CompletionTime": null,
+ "Description": "Job Instance",
+ "EndTime": "TIME_NA",
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Scheduled",
+ "JobType": "NICConfiguration",
+ "Message": "Task successfully scheduled.",
+ "MessageArgs": [],
+ "MessageId": "JCP001",
+ "Name": "Configure: NIC.Integrated.1-1-1",
+ "PercentComplete": 0,
+ "StartTime": "2023-08-07T06:21:24",
+ "TargetSettingsURI": null
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import time
+from urllib.error import HTTPError, URLError
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.compat.version import LooseVersion
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import (
+ idrac_auth_params, iDRACRedfishAPI)
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (
+ delete_job, get_current_time, get_dynamic_uri, get_idrac_firmware_version,
+ get_scheduled_job_resp, remove_key, validate_and_get_first_resource_id_uri,
+ idrac_redfish_job_tracking, xml_data_conversion)
+
+REGISTRY_URI = '/redfish/v1/Registries'
+SYSTEMS_URI = "/redfish/v1/Systems"
+iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+
+SUCCESS_MSG = "Successfully updated the network attributes."
+SUCCESS_CLEAR_PENDING_ATTR_MSG = "Successfully cleared the pending network attributes."
+SCHEDULE_MSG = "Successfully scheduled the job for network attributes update."
+TIMEOUT_NEGATIVE_OR_ZERO_MSG = "The value for the `job_wait_timeout` parameter cannot be negative or zero."
+MAINTENACE_OFFSET_DIFF_MSG = "The maintenance time must be post-fixed with local offset to {0}."
+MAINTENACE_OFFSET_BEHIND_MSG = "The specified maintenance time window occurs in the past, provide a future time to schedule the maintenance window."
+APPLY_TIME_NOT_SUPPORTED_MSG = "Apply time {0} is not supported."
+INVALID_ATTR_MSG = "Unable to update the network attributes because invalid values are entered. " + \
+ "Enter the valid values for the network attributes and retry the operation."
+VALID_AND_INVALID_ATTR_MSG = "Successfully updated the network attributes for valid values. " + \
+ "Unable to update other attributes because invalid values are entered. Enter the valid values and retry the operation."
+NO_CHANGES_FOUND_MSG = "No changes found to be applied."
+CHANGES_FOUND_MSG = "Changes found to be applied."
+INVALID_ID_MSG = "Unable to complete the operation because " + \
+ "the value `{0}` for the input `{1}` parameter is invalid."
+JOB_RUNNING_CLEAR_PENDING_ATTR = "{0} Config job is running. Wait for the job to complete. Currently can not clear pending attributes."
+ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE = 'Attribute is not valid.'
+CLEAR_PENDING_NOT_SUPPORTED_WITHOUT_ATTR_IDRAC8 = "Clear pending is not supported."
+WAIT_TIMEOUT_MSG = "The job is not complete after {0} seconds."
+
+
+class IDRACNetworkAttributes:
+
+ def __init__(self, idrac, module):
+ self.module = module
+ self.idrac = idrac
+ self.redfish_uri = None
+ self.oem_uri = None
+
+ def __perform_validation_for_network_adapter_id(self):
+ odata = '@odata.id'
+ network_adapter_id = self.module.params.get('network_adapter_id')
+ network_adapter_id_uri, found_adapter = '', False
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, SYSTEMS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ network_adapters = get_dynamic_uri(
+ self.idrac, uri, 'NetworkInterfaces')[odata]
+ network_adapter_list = get_dynamic_uri(
+ self.idrac, network_adapters, 'Members')
+ for each_adapter in network_adapter_list:
+ if network_adapter_id in each_adapter.get(odata):
+ found_adapter = True
+ network_adapter_id_uri = each_adapter.get(odata)
+ break
+ if not found_adapter:
+ self.module.exit_json(failed=True, msg=INVALID_ID_MSG.format(network_adapter_id,
+ 'network_adapter_id'))
+ return network_adapter_id_uri
+
+ def __perform_validation_for_network_device_function_id(self):
+ odata = '@odata.id'
+ network_device_function_id_uri, found_device = '', False
+ network_device_function_id = self.module.params.get(
+ 'network_device_function_id')
+ network_adapter_id_uri = self.__perform_validation_for_network_adapter_id()
+ network_devices = get_dynamic_uri(
+ self.idrac, network_adapter_id_uri, 'NetworkDeviceFunctions')[odata]
+ network_device_list = get_dynamic_uri(
+ self.idrac, network_devices, 'Members')
+ for each_device in network_device_list:
+ if network_device_function_id in each_device.get(odata):
+ found_device = True
+ network_device_function_id_uri = each_device.get(odata)
+ break
+ if not found_device:
+ self.module.exit_json(failed=True, msg=INVALID_ID_MSG.format(network_device_function_id,
+ 'network_device_function_id'))
+ return network_device_function_id_uri
+
+ def __get_registry_fw_less_than_6_more_than_3(self):
+ reg = {}
+ network_device_function_id = self.module.params.get(
+ 'network_device_function_id')
+ registry = get_dynamic_uri(self.idrac, REGISTRY_URI, 'Members')
+ for each_member in registry:
+ if network_device_function_id in each_member.get('@odata.id'):
+ location = get_dynamic_uri(
+ self.idrac, each_member.get('@odata.id'), 'Location')
+ if location:
+ uri = location[0].get('Uri')
+ attr = get_dynamic_uri(
+ self.idrac, uri, 'RegistryEntries').get('Attributes', {})
+ for each_attr in attr:
+ reg.update(
+ {each_attr['AttributeName']: each_attr['CurrentValue']})
+ break
+ return reg
+
+ def __validate_time(self, mtime):
+ curr_time, date_offset = get_current_time(self.idrac)
+ if not mtime.endswith(date_offset):
+ self.module.exit_json(
+ failed=True, msg=MAINTENACE_OFFSET_DIFF_MSG.format(date_offset))
+ if mtime < curr_time:
+ self.module.exit_json(
+ failed=True, msg=MAINTENACE_OFFSET_BEHIND_MSG)
+
+ def __get_redfish_apply_time(self, aplytm, rf_settings):
+ rf_set = {}
+ if rf_settings:
+ if aplytm not in rf_settings:
+ self.module.exit_json(
+ failed=True, msg=APPLY_TIME_NOT_SUPPORTED_MSG.format(aplytm))
+ elif 'Maintenance' in aplytm:
+ rf_set['ApplyTime'] = aplytm
+ m_win = self.module.params.get('maintenance_window')
+ self.__validate_time(m_win.get('start_time'))
+ rf_set['MaintenanceWindowStartTime'] = m_win.get('start_time')
+ rf_set['MaintenanceWindowDurationInSeconds'] = m_win.get(
+ 'duration')
+ else:
+ rf_set['ApplyTime'] = aplytm
+ return rf_set
+
+ def __get_registry_fw_less_than_3(self):
+ reg = {}
+ network_device_function_id = self.module.params.get(
+ 'network_device_function_id')
+ scp_response = self.idrac.export_scp(export_format="JSON", export_use="Default",
+ target="NIC", job_wait=True)
+ comp = scp_response.json_data.get("SystemConfiguration", {}).get("Components", {})
+ for each in comp:
+ if each.get('FQDD') == network_device_function_id:
+ for each_attr in each.get('Attributes'):
+ reg.update({each_attr['Name']: each_attr['Value']})
+ return reg
+
+ def get_current_server_registry(self):
+ reg = {}
+ oem_network_attributes = self.module.params.get(
+ 'oem_network_attributes')
+ network_attributes = self.module.params.get('network_attributes')
+ firm_ver = get_idrac_firmware_version(self.idrac)
+ if oem_network_attributes:
+ if LooseVersion(firm_ver) >= '6.0':
+ reg = get_dynamic_uri(self.idrac, self.oem_uri, 'Attributes')
+ elif '3.0' < LooseVersion(firm_ver) < '6.0':
+ reg = self.__get_registry_fw_less_than_6_more_than_3()
+ else:
+ reg = self.__get_registry_fw_less_than_3()
+ if network_attributes: # For Redfish
+ resp = get_dynamic_uri(self.idrac, self.redfish_uri)
+ reg.update({'Ethernet': resp.get('Ethernet', {})})
+ reg.update({'FibreChannel': resp.get('FibreChannel', {})})
+ reg.update({'iSCSIBoot': resp.get('iSCSIBoot', {})})
+ return reg
+
+ def extract_error_msg(self, resp):
+ error_info = {}
+ if resp.body:
+ error = resp.json_data.get('error')
+ for each_dict_err in error.get("@Message.ExtendedInfo"):
+ key = each_dict_err.get('MessageArgs')[0]
+ msg = each_dict_err.get('Message')
+ if key not in error_info:
+ error_info.update({key: msg})
+ return error_info
+
+ def get_diff_between_current_and_module_input(self, module_attr, server_attr):
+ diff, invalid = 0, {}
+ if module_attr is None:
+ module_attr = {}
+ for each_attr in module_attr:
+ if each_attr in server_attr:
+ data_type = type(server_attr[each_attr])
+ if not isinstance(module_attr[each_attr], data_type):
+ diff += 1
+ elif isinstance(module_attr[each_attr], dict) and isinstance(server_attr[each_attr], dict):
+ tmp_diff, tmp_invalid = self.get_diff_between_current_and_module_input(
+ module_attr[each_attr], server_attr[each_attr])
+ diff += tmp_diff
+ invalid.update(tmp_invalid)
+ elif module_attr[each_attr] != server_attr[each_attr]:
+ diff += 1
+ elif each_attr not in server_attr:
+ invalid.update(
+ {each_attr: ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE})
+ return diff, invalid
+
+ def validate_job_timeout(self):
+ if self.module.params.get("job_wait") and self.module.params.get("job_wait_timeout") <= 0:
+ self.module.exit_json(
+ msg=TIMEOUT_NEGATIVE_OR_ZERO_MSG, failed=True)
+
+ def apply_time(self, setting_uri):
+ resp = get_dynamic_uri(self.idrac, setting_uri, "@Redfish.Settings")
+ rf_settings = resp.get("SupportedApplyTimes", [])
+ apply_time = self.module.params.get('apply_time', {})
+ rf_set = self.__get_redfish_apply_time(apply_time, rf_settings)
+ return rf_set
+
+ def set_dynamic_base_uri_and_validate_ids(self):
+ network_device_function_id_uri = self.__perform_validation_for_network_device_function_id()
+ resp = get_dynamic_uri(self.idrac, network_device_function_id_uri)
+ self.oem_uri = resp.get('Links', {}).get('Oem', {}).get(
+ 'Dell', {}).get('DellNetworkAttributes', {}).get('@odata.id', {})
+ self.redfish_uri = network_device_function_id_uri
+
+
+class OEMNetworkAttributes(IDRACNetworkAttributes):
+ def __init__(self, idrac, module):
+ super().__init__(idrac, module)
+
+ def clear_pending(self):
+ firm_ver = get_idrac_firmware_version(self.idrac)
+ oem_network_attributes = self.module.params.get(
+ 'oem_network_attributes')
+ if LooseVersion(firm_ver) < '3.0':
+ if oem_network_attributes:
+ return None
+ self.module.exit_json(
+ msg=CLEAR_PENDING_NOT_SUPPORTED_WITHOUT_ATTR_IDRAC8)
+ resp = get_dynamic_uri(self.idrac, self.oem_uri, '@Redfish.Settings')
+ settings_uri = resp.get('SettingsObject').get('@odata.id')
+ settings_uri_resp = get_dynamic_uri(self.idrac, settings_uri)
+ pending_attributes = settings_uri_resp.get('Attributes')
+ clear_pending_uri = settings_uri_resp.get('Actions').get(
+ '#DellManager.ClearPending').get('target')
+ if not pending_attributes and not oem_network_attributes:
+ self.module.exit_json(msg=NO_CHANGES_FOUND_MSG)
+ job_resp = get_scheduled_job_resp(self.idrac, 'NICConfiguration')
+ job_id, job_state = job_resp.get('Id'), job_resp.get('JobState')
+ if job_id:
+ if job_state in ["Running"]:
+ job_resp = remove_key(job_resp, regex_pattern='(.*?)@odata')
+ self.module.exit_json(failed=True, msg=JOB_RUNNING_CLEAR_PENDING_ATTR.format('NICConfiguration'),
+ job_status=job_resp)
+ elif job_state in ["Starting", "Scheduled", "Scheduling"]:
+ if self.module.check_mode and not oem_network_attributes:
+ self.module.exit_json(msg=CHANGES_FOUND_MSG, changed=True)
+ if not self.module.check_mode:
+ delete_job(self.idrac, job_id)
+ if self.module.check_mode and not oem_network_attributes:
+ self.module.exit_json(msg=CHANGES_FOUND_MSG, changed=True)
+ time.sleep(5)
+ settings_uri_resp = get_dynamic_uri(self.idrac, settings_uri)
+ pending_attributes = settings_uri_resp.get('Attributes')
+ if pending_attributes and not self.module.check_mode:
+ self.idrac.invoke_request(
+ clear_pending_uri, "POST", data="{}", dump=False)
+ if not oem_network_attributes:
+ self.module.exit_json(
+ msg=SUCCESS_CLEAR_PENDING_ATTR_MSG, changed=True)
+
+ def perform_operation(self):
+ oem_network_attributes = self.module.params.get(
+ 'oem_network_attributes')
+ network_device_function_id = self.module.params.get(
+ 'network_device_function_id')
+ apply_time = self.module.params.get('apply_time')
+ job_wait = self.module.params.get('job_wait')
+ invalid_attr = {}
+ firm_ver = get_idrac_firmware_version(self.idrac)
+ if LooseVersion(firm_ver) < '3.0':
+ root = """<SystemConfiguration>{0}</SystemConfiguration>"""
+ scp_payload = root.format(xml_data_conversion(
+ oem_network_attributes, network_device_function_id))
+ resp = self.idrac.import_scp(
+ import_buffer=scp_payload, target="NIC", job_wait=False)
+ else:
+ payload = {'Attributes': oem_network_attributes}
+ apply_time_setting = self.apply_time(self.oem_uri)
+ if apply_time_setting:
+ payload.update(
+ {"@Redfish.SettingsApplyTime": apply_time_setting})
+ patch_uri = get_dynamic_uri(self.idrac, self.oem_uri).get(
+ '@Redfish.Settings').get('SettingsObject').get('@odata.id')
+ resp = self.idrac.invoke_request(
+ method='PATCH', uri=patch_uri, data=payload)
+ job_wait = job_wait if apply_time == "Immediate" else False
+ invalid_attr = self.extract_error_msg(resp)
+ return resp, invalid_attr, job_wait
+
+
+class NetworkAttributes(IDRACNetworkAttributes):
+ def __init__(self, idrac, module):
+ super().__init__(idrac, module)
+
+ def perform_operation(self):
+ updatable_fields = ['Ethernet', 'iSCSIBoot', 'FibreChannel']
+ network_attributes = self.module.params.get('network_attributes')
+ apply_time = self.module.params.get('apply_time')
+ job_wait = self.module.params.get('job_wait')
+ payload, invalid_attr = {}, {}
+ for each_attr in network_attributes:
+ if each_attr in updatable_fields:
+ payload.update({each_attr: network_attributes[each_attr]})
+ apply_time_setting = self.apply_time(self.redfish_uri)
+ if apply_time_setting:
+ payload.update({"@Redfish.SettingsApplyTime": apply_time_setting})
+ resp = get_dynamic_uri(self.idrac, self.redfish_uri)
+ patch_uri = resp.get(
+ "@Redfish.Settings", {}).get("SettingsObject", {}).get("@odata.id", {})
+ resp = self.idrac.invoke_request(
+ method='PATCH', uri=patch_uri, data=payload)
+ invalid_attr = self.extract_error_msg(resp)
+ job_wait = job_wait if apply_time == "Immediate" else False
+ return resp, invalid_attr, job_wait
+
+
+def perform_operation_for_main(idrac, module, obj, diff, _invalid_attr):
+ job_wait_timeout = module.params.get('job_wait_timeout')
+ if diff:
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND_MSG, changed=True,
+ invalid_attributes=_invalid_attr)
+ else:
+ job_resp, invalid_attr, job_wait = obj.perform_operation()
+ job_dict = {}
+ if (job_tracking_uri := job_resp.headers.get("Location")):
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = iDRAC_JOB_URI.format(job_id=job_id)
+ if job_wait:
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(idrac, job_uri,
+ max_job_wait_sec=job_wait_timeout,
+ sleep_interval_secs=1)
+ job_dict = remove_key(job_dict,
+ regex_pattern='(.*?)@odata')
+ if int(wait_time) >= int(job_wait_timeout):
+ module.exit_json(msg=WAIT_TIMEOUT_MSG.format(
+ job_wait_timeout), changed=True, job_status=job_dict)
+ if job_failed:
+ module.fail_json(
+ msg=job_dict.get("Message"), invalid_attributes=invalid_attr, job_status=job_dict)
+ else:
+ job_resp = idrac.invoke_request(job_uri, 'GET')
+ job_dict = job_resp.json_data
+ job_dict = remove_key(job_dict,
+ regex_pattern='(.*?)@odata')
+
+ if job_dict.get('JobState') == "Completed":
+ firm_ver = get_idrac_firmware_version(idrac)
+ msg = SUCCESS_MSG if not invalid_attr else VALID_AND_INVALID_ATTR_MSG
+ if LooseVersion(firm_ver) < '3.0' and isinstance(obj, OEMNetworkAttributes):
+ message_id = job_dict.get("MessageId")
+ if message_id == "SYS053":
+ module.exit_json(msg=msg, changed=True, job_status=job_dict)
+ elif message_id == "SYS055":
+ module.exit_json(
+ msg=VALID_AND_INVALID_ATTR_MSG, changed=True, job_status=job_dict)
+ elif message_id == "SYS067":
+ module.fail_json(msg=INVALID_ATTR_MSG,
+ job_status=job_dict)
+ else:
+ module.fail_json(msg=job_dict.get("Message"))
+ else:
+ msg = SCHEDULE_MSG
+ module.exit_json(msg=msg, invalid_attributes=invalid_attr,
+ job_status=job_dict, changed=True)
+ else:
+ if module.check_mode:
+ module.exit_json(msg=NO_CHANGES_FOUND_MSG,
+ invalid_attributes=_invalid_attr)
+ # When user has given only invalid attribute, diff will 0 and _invalid_attr will have dictionary,
+ elif _invalid_attr: # Expecting HTTP Error from server.
+ job_resp, invalid_attr, job_wait = obj.perform_operation()
+ module.exit_json(msg=NO_CHANGES_FOUND_MSG,
+ invalid_attributes=_invalid_attr)
+
+
+def main():
+ try:
+ specs = {
+ "network_adapter_id": {"type": 'str', "required": True},
+ "network_device_function_id": {"type": 'str', "required": True},
+ "network_attributes": {"type": 'dict'},
+ "oem_network_attributes": {"type": 'dict'},
+ "resource_id": {"type": 'str'},
+ "clear_pending": {"type": 'bool', "default": False},
+ "apply_time": {"type": 'str', "required": True,
+ "choices": ['Immediate', 'OnReset', 'AtMaintenanceWindowStart', 'InMaintenanceWindowOnReset']},
+ "maintenance_window": {"type": 'dict',
+ "options": {"start_time": {"type": 'str', "required": True},
+ "duration": {"type": 'int', "required": True}}},
+ "job_wait": {"type": "bool", "default": True},
+ "job_wait_timeout": {"type": "int", "default": 1200}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(argument_spec=specs,
+ mutually_exclusive=[
+ ('network_attributes', 'oem_network_attributes')],
+ required_if=[["apply_time", "AtMaintenanceWindowStart", ("maintenance_window",)],
+ ["apply_time", "InMaintenanceWindowOnReset", ("maintenance_window",)]],
+ supports_check_mode=True)
+ with iDRACRedfishAPI(module.params, req_session=True) as idrac:
+ if module_attribute := module.params.get('network_attributes'):
+ network_attr_obj = NetworkAttributes(idrac, module)
+ else:
+ module_attribute = module.params.get('oem_network_attributes')
+ network_attr_obj = OEMNetworkAttributes(idrac, module)
+ network_attr_obj.set_dynamic_base_uri_and_validate_ids()
+ network_attr_obj.validate_job_timeout()
+ if module.params.get('clear_pending') and 'clear_pending' in dir(network_attr_obj):
+ network_attr_obj.clear_pending()
+ server_reg = network_attr_obj.get_current_server_registry()
+ diff, invalid_attr = network_attr_obj.get_diff_between_current_and_module_input(
+ module_attribute, server_reg)
+ perform_operation_for_main(idrac,
+ module, network_attr_obj, diff, invalid_attr)
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern='(.*?)@odata')
+ module.exit_json(msg=str(err), error_info=filter_err, failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py
index 797534e39..f07d16868 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -22,7 +22,7 @@ extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
options:
share_name:
- required: True
+ required: true
description: CIFS or NFS Network share.
type: str
share_user:
@@ -34,7 +34,7 @@ options:
type: str
aliases: ['share_pwd']
iso_image:
- required: True
+ required: true
description: Network ISO name.
type: str
expose_duration:
@@ -44,12 +44,13 @@ options:
default: 1080
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Jagadeesh N V (@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module does not support C(check_mode).
'''
@@ -62,7 +63,7 @@ EXAMPLES = r'''
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.0:/nfsfileshare"
- iso_image: "unattended_os_image.iso"
+ iso_image: "unattended_os_image.iso"
expose_duration: 180
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py
index a506e5ce2..40cc5768d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.3.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -50,10 +50,10 @@ options:
- C(ChangePDStateToOnline) - To set the disk status to online. I(target) is required for this operation.
- C(ChangePDStateToOffline) - To set the disk status to offline. I(target) is required for this operation.
- C(LockVirtualDisk) - To encrypt the virtual disk. I(volume_id) is required for this operation.
+ - C(OnlineCapacityExpansion) - To expand the size of virtual disk. I(volume_id), and I(target) or I(size) is required for this operation.
choices: [ResetConfig, AssignSpare, SetControllerKey, RemoveControllerKey, ReKey, UnassignSpare,
EnableControllerEncryption, BlinkTarget, UnBlinkTarget, ConvertToRAID, ConvertToNonRAID,
- ChangePDStateToOnline, ChangePDStateToOffline, LockVirtualDisk]
- default: AssignSpare
+ ChangePDStateToOnline, ChangePDStateToOffline, LockVirtualDisk, OnlineCapacityExpansion]
type: str
target:
description:
@@ -62,6 +62,7 @@ options:
C(ChangePDStateToOnline), C(ChangePDStateToOffline), C(ConvertToRAID), or C(ConvertToNonRAID).
- If I(volume_id) is not specified or empty, this physical drive will be
assigned as a global hot spare when I(command) is C(AssignSpare).
+ - When I(command) is C(OnlineCapacityExpansion), then I(target) is mutually exclusive with I(size).
- "Notes: Global or Dedicated hot spare can be assigned only once for a physical disk,
Re-assign cannot be done when I(command) is C(AssignSpare)."
type: list
@@ -81,6 +82,7 @@ options:
- Fully Qualified Device Descriptor (FQDD) of the storage controller. For example-'RAID.Slot.1-1'.
- This option is mandatory when I(command) is C(ResetConfig), C(SetControllerKey),
C(RemoveControllerKey), C(ReKey), or C(EnableControllerEncryption).
+ - This option is mandatory for I(attributes).
type: str
key:
description:
@@ -115,26 +117,78 @@ options:
choices: [LKM, SEKM]
default: LKM
type: str
+ size:
+ description:
+ - Capacity of the virtual disk to be expanded in MB.
+ - Check mode and Idempotency is not supported for I(size).
+ - Minimum Online Capacity Expansion size must be greater than 100 MB of the current size.
+ - When I(command) is C(OnlineCapacityExpansion), then I(size) is mutually exclusive with I(target).
+ type: int
+ attributes:
+ type: dict
+ description:
+ - Dictionary of controller attributes and value pair.
+ - This feature is only supported for iDRAC9 with firmware version 6.00.00.00 and above
+ - I(controller_id) is required for this operation.
+ - I(apply_time) and I(maintenance_window) is applicable for I(attributes).
+ - I(attributes) is mutually exclusive with I(command).
+ - Use U(https://I(idrac_ip)/redfish/v1/Schemas/DellOemStorageController.json) to view the attributes.
+ apply_time:
+ type: str
+ description:
+ - Apply time of the I(attributes).
+ - This is applicable only to I(attributes).
+ - "C(Immediate) Allows the user to immediately reboot the host and apply the changes. I(job_wait)
+ is applicable."
+ - C(OnReset) Allows the user to apply the changes on the next reboot of the host server.
+ - "C(AtMaintenanceWindowStart) Allows the user to apply at the start of a maintenance window as specified
+ in I(maintenance_window)."
+ - "C(InMaintenanceWindowOnReset) Allows to apply after a manual reset but within the maintenance window as
+ specified in I(maintenance_window)."
+ choices: [Immediate, OnReset, AtMaintenanceWindowStart, InMaintenanceWindowOnReset]
+ default: Immediate
+ maintenance_window:
+ type: dict
+ description:
+ - Option to schedule the maintenance window.
+ - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset).
+ suboptions:
+ start_time:
+ type: str
+ description:
+ - The start time for the maintenance window to be scheduled.
+ - "The format is YYYY-MM-DDThh:mm:ss<offset>"
+ - "<offset> is the time offset from UTC that the current timezone set in
+ iDRAC in the format: +05:30 for IST."
+ required: true
+ duration:
+ type: int
+ description:
+ - The duration in seconds for the maintenance window.
+ default: 900
job_wait:
description:
- Provides the option if the module has to wait for the job to be completed.
+ - This is applicable for I(attributes) when I(apply_time) is C(Immediate).
type: bool
- default: False
+ default: false
job_wait_timeout:
description:
- The maximum wait time of job completion in seconds before the job tracking is stopped.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 120
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Jagadeesh N V (@jagadeeshnv)"
- "Felix Stephen (@felixs88)"
- "Husniya Hameed (@husniya_hameed)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
notes:
- Run this module from a system that has direct access to Dell iDRAC.
- - This module always reports as changes found when C(ReKey), C(BlinkTarget), and C(UnBlinkTarget).
+ - This module is supported on iDRAC9.
+ - This module always reports as changes found when I(command) is C(ReKey), C(BlinkTarget), and C(UnBlinkTarget).
- This module supports C(check_mode).
'''
@@ -346,6 +400,60 @@ EXAMPLES = r'''
volume_id: "Disk.Virtual.0:RAID.SL.3-1"
tags:
- lock
+
+- name: Online Capacity Expansion of a volume using target
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "OnlineCapacityExpansion"
+ volume_id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ target:
+ - "Disk.Bay.2:Enclosure.Internal.0-0:RAID.Integrated.1-1"
+ tags:
+ - oce_target
+
+- name: Online Capacity Expansion of a volume using size
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "OnlineCapacityExpansion"
+ volume_id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ size: 362785
+ tags:
+ - oce_size
+
+- name: Set controller attributes.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: "RAID.Slot.1-1"
+ attributes:
+ ControllerMode: "HBA"
+ apply_time: "OnReset"
+ tags:
+ - controller-attribute
+
+- name: Configure controller attributes at Maintenance window
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: "RAID.Slot.1-1"
+ attributes:
+ CheckConsistencyMode: Normal
+ CopybackMode: "Off"
+ LoadBalanceMode: Disabled
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 1200
'''
RETURN = r'''
@@ -425,6 +533,10 @@ CONTROLLER_URI = "/redfish/v1/Dell/Systems/{system_id}/Storage/DellController/{c
VOLUME_URI = "/redfish/v1/Systems/{system_id}/Storage/{controller_id}/Volumes"
PD_URI = "/redfish/v1/Systems/System.Embedded.1/Storage/{controller_id}/Drives/{drive_id}"
JOB_URI_OEM = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/{job_id}"
+CONTROLLERS_URI = "/redfish/v1/Systems/{system_id}/Storage/{controller_id}/Controllers/{controller_id}"
+MANAGER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1"
+SETTINGS_URI = "/redfish/v1/Systems/{system_id}/Storage/{controller_id}/Controllers/{controller_id}/Settings"
+OCE_MIN_PD_RAID_MAPPING = {'RAID0': 1, 'RAID5': 1, 'RAID6': 1, 'RAID10': 2}
JOB_SUBMISSION = "Successfully submitted the job that performs the '{0}' operation."
JOB_COMPLETION = "Successfully performed the '{0}' operation."
@@ -432,8 +544,23 @@ CHANGES_FOUND = "Changes found to be applied."
NO_CHANGES_FOUND = "No changes found to be applied."
TARGET_ERR_MSG = "The Fully Qualified Device Descriptor (FQDD) of the target {0} must be only one."
PD_ERROR_MSG = "Unable to locate the physical disk with the ID: {0}"
+VD_ERROR_MSG = "Unable to locate the virtual disk with the ID: {0}"
ENCRYPT_ERR_MSG = "The storage controller '{0}' does not support encryption."
PHYSICAL_DISK_ERR = "Volume is not encryption capable."
+OCE_RAID_TYPE_ERR = "Online Capacity Expansion is not supported for {0} virtual disks."
+OCE_SIZE_100MB = "Minimum Online Capacity Expansion size must be greater than 100 MB of the current size {0}."
+OCE_TARGET_EMPTY = "Provided list of targets is empty."
+OCE_TARGET_RAID1_ERR = "Cannot add more than two disks to RAID1 virtual disk."
+UNSUPPORTED_APPLY_TIME = "Apply time {0} is not supported."
+MAINTENANCE_OFFSET = "The maintenance time must be post-fixed with local offset to {0}."
+MAINTENANCE_TIME = "The specified maintenance time window occurs in the past, " \
+ "provide a future time to schedule the maintenance window."
+HBA_MODE = "Other attributes cannot be updated when ControllerMode is provided as input."
+INVALID_ATTRIBUTES = "The following attributes are invalid: {0}"
+CONTROLLER_ID_REQUIRED = "controller_id is required to perform this operation."
+JOB_COMPLETION_ATTRIBUTES = "Successfully applied the controller attributes."
+JOB_SUBMISSION_ATTRIBUTES = "Successfully submitted the job that configures the controller attributes."
+ERR_MSG = "Unable to configure the controller attribute(s) settings."
def check_id_exists(module, redfish_obj, key, item_id, uri):
@@ -441,9 +568,9 @@ def check_id_exists(module, redfish_obj, key, item_id, uri):
try:
resp = redfish_obj.invoke_request("GET", uri.format(system_id=SYSTEM_ID, controller_id=item_id))
if not resp.success:
- module.fail_json(msg=msg)
+ module.exit_json(msg=msg, failed=True)
except HTTPError as err:
- module.fail_json(msg=msg, error_info=json.load(err))
+ module.exit_json(msg=msg, error_info=json.load(err), failed=True)
def ctrl_key(module, redfish_obj):
@@ -626,7 +753,7 @@ def target_identify_pattern(module, redfish_obj):
def lock_virtual_disk(module, redfish_obj):
- volume, command = module.params.get("volume_id"), module.params["command"]
+ volume = module.params.get("volume_id")
resp, job_uri, job_id = None, None, None
controller_id = volume[0].split(":")[-1]
check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI)
@@ -660,6 +787,68 @@ def lock_virtual_disk(module, redfish_obj):
return resp, job_uri, job_id
+def online_capacity_expansion(module, redfish_obj):
+ payload = None
+ volume_id = module.params.get("volume_id")
+ target = module.params.get("target")
+ size = module.params.get("size")
+ if not isinstance(volume_id, list):
+ volume_id = [volume_id]
+ if len(volume_id) != 1:
+ module.exit_json(msg=TARGET_ERR_MSG.format("virtual drive"), failed=True)
+
+ controller_id = volume_id[0].split(":")[-1]
+ volume_uri = VOLUME_URI + "/{volume_id}"
+ try:
+ volume_resp = redfish_obj.invoke_request("GET", volume_uri.format(system_id=SYSTEM_ID,
+ controller_id=controller_id,
+ volume_id=volume_id[0]))
+ except HTTPError:
+ module.exit_json(msg=VD_ERROR_MSG.format(volume_id[0]), failed=True)
+
+ try:
+ raid_type = volume_resp.json_data.get("RAIDType")
+ if raid_type in ['RAID50', 'RAID60']:
+ module.exit_json(msg=OCE_RAID_TYPE_ERR.format(raid_type), failed=True)
+
+ if target is not None:
+ if not target:
+ module.exit_json(msg=OCE_TARGET_EMPTY, failed=True)
+
+ if raid_type == 'RAID1':
+ module.fail_json(msg=OCE_TARGET_RAID1_ERR)
+
+ current_pd = []
+ links = volume_resp.json_data.get("Links")
+ if links:
+ for disk in volume_resp.json_data.get("Links").get("Drives"):
+ drive = disk["@odata.id"].split('/')[-1]
+ current_pd.append(drive)
+ drives_to_add = [each_drive for each_drive in target if each_drive not in current_pd]
+ if module.check_mode and drives_to_add and len(drives_to_add) % OCE_MIN_PD_RAID_MAPPING[raid_type] == 0:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif len(drives_to_add) == 0 or len(drives_to_add) % OCE_MIN_PD_RAID_MAPPING[raid_type] != 0:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ payload = {"TargetFQDD": volume_id[0], "PDArray": drives_to_add}
+
+ elif size:
+ vd_size = volume_resp.json_data.get("CapacityBytes")
+ vd_size_MB = vd_size // (1024 * 1024)
+ if (size - vd_size_MB) < 100:
+ module.exit_json(msg=OCE_SIZE_100MB.format(vd_size_MB), failed=True)
+ payload = {"TargetFQDD": volume_id[0], "Size": size}
+
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID,
+ action="OnlineCapacityExpansion"),
+ data=payload)
+ job_uri = resp.headers.get("Location")
+ job_id = job_uri.split("/")[-1]
+ return resp, job_uri, job_id
+ except HTTPError as err:
+ err = json.load(err).get("error").get("@Message.ExtendedInfo", [{}])[0].get("Message")
+ module.exit_json(msg=err, failed=True)
+
+
def validate_inputs(module):
module_params = module.params
command = module_params.get("command")
@@ -689,13 +878,113 @@ def validate_inputs(module):
module.fail_json(msg=TARGET_ERR_MSG.format("physical disk"))
+def get_current_time(redfish_obj):
+ try:
+ resp = redfish_obj.invoke_request("GET", MANAGER_URI)
+ curr_time = resp.json_data.get("DateTime")
+ date_offset = resp.json_data.get("DateTimeLocalOffset")
+ except Exception:
+ return None, None
+ return curr_time, date_offset
+
+
+def validate_time(module, redfish_obj, mtime):
+ curr_time, date_offset = get_current_time(redfish_obj)
+ if not mtime.endswith(date_offset):
+ module.exit_json(failed=True, status_msg=MAINTENANCE_OFFSET.format(date_offset))
+ if mtime < curr_time:
+ module.exit_json(failed=True, status_msg=MAINTENANCE_TIME)
+
+
+def get_attributes(module, redfish_obj):
+ resp_data = {}
+ controller_id = module.params["controller_id"]
+ try:
+ resp = redfish_obj.invoke_request("GET", CONTROLLERS_URI.format(system_id=SYSTEM_ID,
+ controller_id=controller_id))
+ resp_data = resp.json_data
+ except HTTPError:
+ resp_data = {}
+ return resp_data
+
+
+def check_attr_exists(module, curr_attr, inp_attr):
+ invalid_attr = []
+ pending_attr = {}
+ diff = 0
+ for each in inp_attr:
+ if each not in curr_attr.keys():
+ invalid_attr.append(each)
+ elif curr_attr[each] != inp_attr[each]:
+ diff = 1
+ pending_attr[each] = inp_attr[each]
+ if invalid_attr:
+ module.exit_json(msg=INVALID_ATTRIBUTES.format(invalid_attr), failed=True)
+ if diff and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif not diff:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ return pending_attr
+
+
+def get_redfish_apply_time(module, redfish_obj, apply_time, time_settings):
+ time_set = {}
+ if time_settings:
+ if 'Maintenance' in apply_time:
+ if apply_time not in time_settings:
+ module.exit_json(failed=True, status_msg=UNSUPPORTED_APPLY_TIME.format(apply_time))
+ else:
+ time_set['ApplyTime'] = apply_time
+ m_win = module.params.get('maintenance_window')
+ validate_time(module, redfish_obj, m_win.get('start_time'))
+ time_set['MaintenanceWindowStartTime'] = m_win.get('start_time')
+ time_set['MaintenanceWindowDurationInSeconds'] = m_win.get('duration')
+ else:
+ time_set['ApplyTime'] = apply_time
+ return time_set
+
+
+def apply_attributes(module, redfish_obj, pending, time_settings):
+ payload = {"Oem": {"Dell": {"DellStorageController": pending}}}
+ apply_time = module.params.get('apply_time')
+ time_set = get_redfish_apply_time(module, redfish_obj, apply_time, time_settings)
+ if time_set:
+ payload["@Redfish.SettingsApplyTime"] = time_set
+ try:
+ resp = redfish_obj.invoke_request("PATCH", SETTINGS_URI.format(system_id=SYSTEM_ID,
+ controller_id=module.params["controller_id"]),
+ data=payload)
+ if resp.status_code == 202 and "error" in resp.json_data:
+ msg_err_id = resp.json_data.get("error").get("@Message.ExtendedInfo", [{}])[0].get("MessageId")
+ if "Created" not in msg_err_id:
+ module.exit_json(msg=ERR_MSG, error_info=resp.json_data, failed=True)
+ except HTTPError as err:
+ err = json.load(err).get("error")
+ module.exit_json(msg=ERR_MSG, error_info=err, failed=True)
+ job_id = resp.headers["Location"].split("/")[-1]
+ return job_id, time_set
+
+
+def set_attributes(module, redfish_obj):
+ resp_data = get_attributes(module, redfish_obj)
+ curr_attr = resp_data.get("Oem").get("Dell").get("DellStorageController")
+ inp_attr = module.params.get("attributes")
+ if inp_attr.get("ControllerMode") and len(inp_attr.keys()) > 1:
+ module.exit_json(msg=HBA_MODE, failed=True)
+ pending = check_attr_exists(module, curr_attr, inp_attr)
+ time_settings = resp_data.get("@Redfish.Settings", {}).get("SupportedApplyTimes", [])
+ job_id, time_set = apply_attributes(module, redfish_obj, pending, time_settings)
+ return job_id, time_set
+
+
def main():
specs = {
- "command": {"required": False, "default": "AssignSpare",
+ "attributes": {"type": 'dict'},
+ "command": {"required": False,
"choices": ["ResetConfig", "AssignSpare", "SetControllerKey", "RemoveControllerKey",
"ReKey", "UnassignSpare", "EnableControllerEncryption", "BlinkTarget",
"UnBlinkTarget", "ConvertToRAID", "ConvertToNonRAID", "ChangePDStateToOnline",
- "ChangePDStateToOffline", "LockVirtualDisk"]},
+ "ChangePDStateToOffline", "LockVirtualDisk", "OnlineCapacityExpansion"]},
"controller_id": {"required": False, "type": "str"},
"volume_id": {"required": False, "type": "list", "elements": "str"},
"target": {"required": False, "type": "list", "elements": "str", "aliases": ["drive_id"]},
@@ -703,12 +992,20 @@ def main():
"key_id": {"required": False, "type": "str"},
"old_key": {"required": False, "type": "str", "no_log": True},
"mode": {"required": False, "choices": ["LKM", "SEKM"], "default": "LKM"},
+ "apply_time": {"type": 'str', "default": 'Immediate',
+ "choices": ['Immediate', 'OnReset', 'AtMaintenanceWindowStart', 'InMaintenanceWindowOnReset']},
+ "maintenance_window": {"type": 'dict',
+ "options": {"start_time": {"type": 'str', "required": True},
+ "duration": {"type": 'int', "required": False, "default": 900}}},
"job_wait": {"required": False, "type": "bool", "default": False},
- "job_wait_timeout": {"required": False, "type": "int", "default": 120}
+ "job_wait_timeout": {"required": False, "type": "int", "default": 120},
+ "size": {"required": False, "type": "int"}
}
specs.update(redfish_auth_params)
module = AnsibleModule(
argument_spec=specs,
+ mutually_exclusive=[('attributes', 'command'), ("target", "size")],
+ required_one_of=[('attributes', 'command')],
required_if=[
["command", "SetControllerKey", ["controller_id", "key", "key_id"]],
["command", "ReKey", ["controller_id", "mode"]], ["command", "ResetConfig", ["controller_id"]],
@@ -718,10 +1015,15 @@ def main():
["command", "UnBlinkTarget", ["target", "volume_id"], True], ["command", "ConvertToRAID", ["target"]],
["command", "ConvertToNonRAID", ["target"]], ["command", "ChangePDStateToOnline", ["target"]],
["command", "ChangePDStateToOffline", ["target"]],
- ["command", "LockVirtualDisk", ["volume_id"]]
+ ["command", "LockVirtualDisk", ["volume_id"]], ["command", "OnlineCapacityExpansion", ["volume_id"]],
+ ["command", "OnlineCapacityExpansion", ["target", "size"], True],
+ ["command", "LockVirtualDisk", ["volume_id"]],
+ ["apply_time", "AtMaintenanceWindowStart", ("maintenance_window",)],
+ ["apply_time", "InMaintenanceWindowOnReset", ("maintenance_window",)]
],
supports_check_mode=True)
- validate_inputs(module)
+ if not bool(module.params["attributes"]):
+ validate_inputs(module)
try:
command = module.params["command"]
with Redfish(module.params, req_session=True) as redfish_obj:
@@ -742,6 +1044,33 @@ def main():
resp, job_uri, job_id = change_pd_status(module, redfish_obj)
elif command == "LockVirtualDisk":
resp, job_uri, job_id = lock_virtual_disk(module, redfish_obj)
+ elif command == "OnlineCapacityExpansion":
+ resp, job_uri, job_id = online_capacity_expansion(module, redfish_obj)
+
+ if module.params["attributes"]:
+ controller_id = module.params["controller_id"]
+ if controller_id is None:
+ module.exit_json(msg=CONTROLLER_ID_REQUIRED, failed=True)
+ check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI)
+ job_id, time_set = set_attributes(module, redfish_obj)
+ job_uri = JOB_URI_OEM.format(job_id=job_id)
+ if time_set["ApplyTime"] == "Immediate" and module.params["job_wait"]:
+ resp, msg = wait_for_job_completion(redfish_obj, job_uri, job_wait=module.params["job_wait"],
+ wait_timeout=module.params["job_wait_timeout"])
+ job_data = strip_substr_dict(resp.json_data)
+ if job_data["JobState"] == "Failed":
+ changed, failed = False, True
+ else:
+ changed, failed = True, False
+ module.exit_json(msg=JOB_COMPLETION_ATTRIBUTES, task={"id": job_id, "uri": job_uri},
+ status=job_data, changed=changed, failed=failed)
+ else:
+ resp, msg = wait_for_job_completion(redfish_obj, job_uri, job_wait=False,
+ wait_timeout=module.params["job_wait_timeout"])
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=JOB_SUBMISSION_ATTRIBUTES, task={"id": job_id, "uri": job_uri},
+ status=job_data)
+
oem_job_url = JOB_URI_OEM.format(job_id=job_id)
job_wait = module.params["job_wait"]
if job_wait:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
index 8de5ffc9f..2c28c9a5f 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -27,12 +27,13 @@ extends_documentation_fragment:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
index 67a02c12e..bd7fe2c67 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,7 @@ short_description: Export or Import iDRAC Server Configuration Profile (SCP)
version_added: "2.1.0"
description:
- Export the Server Configuration Profile (SCP) from the iDRAC or import from a
- network share (CIFS, NFS, HTTP, HTTPS) or a local file.
+ network share (CIFS, NFS, HTTP, HTTPS) or a local path.
extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
options:
@@ -35,13 +35,13 @@ options:
job_wait:
description: Whether to wait for job completion or not.
type: bool
- required: True
+ required: true
share_name:
description:
- Network share or local path.
- CIFS, NFS, HTTP, and HTTPS network share types are supported.
+ - I(share_name) is mutually exclusive with I(import_buffer).
type: str
- required: True
share_user:
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
part of a domain else 'user'. This option is mandatory for CIFS Network Share.
@@ -59,14 +59,27 @@ options:
type: str
scp_components:
description:
- - If C(ALL), this module exports or imports all components configurations from SCP file.
- - If C(IDRAC), this module exports or imports iDRAC configuration from SCP file.
- - If C(BIOS), this module exports or imports BIOS configuration from SCP file.
- - If C(NIC), this module exports or imports NIC configuration from SCP file.
- - If C(RAID), this module exports or imports RAID configuration from SCP file.
- type: str
- choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
+ - If C(ALL), this option exports or imports all components configurations from the SCP file.
+ - If C(IDRAC), this option exports or imports iDRAC configuration from the SCP file.
+ - If C(BIOS), this option exports or imports BIOS configuration from the SCP file.
+ - If C(NIC), this option exports or imports NIC configuration from the SCP file.
+ - If C(RAID), this option exports or imports RAID configuration from the SCP file.
+ - If C(FC), this option exports or imports FiberChannel configurations from the SCP file.
+ - If C(InfiniBand), this option exports or imports InfiniBand configuration from the SCP file.
+ - If C(SupportAssist), this option exports or imports SupportAssist configuration from the SCP file.
+ - If C(EventFilters), this option exports or imports EventFilters configuration from the SCP file.
+ - If C(System), this option exports or imports System configuration from the SCP file.
+ - If C(LifecycleController), this option exports or imports SupportAssist configuration from the SCP file.
+ - If C(AHCI), this option exports or imports EventFilters configuration from the SCP file.
+ - If C(PCIeSSD), this option exports or imports PCIeSSD configuration from the SCP file.
+ - When I(command) is C(export) or C(import) I(target) with multiple components is supported only
+ on iDRAC9 with firmware 6.10.00.00 and above.
+ type: list
+ choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID', 'FC', 'InfiniBand', 'SupportAssist',
+ 'EventFilters', 'System', 'LifecycleController', 'AHCI', 'PCIeSSD']
default: 'ALL'
+ elements: str
+ aliases: ['target']
shutdown_type:
description:
- This option is applicable for C(import) command.
@@ -90,22 +103,105 @@ options:
choices: ['JSON', 'XML']
default: 'XML'
export_use:
- description: Specify the type of server configuration profile (SCP) to be exported.
- This option is applicable for C(export) command.
+ description:
+ - Specify the type of Server Configuration Profile (SCP) to be exported.
+ - This option is applicable when I(command) is C(export).
+ - C(Default) Creates a non-destructive snapshot of the configuration.
+ - C(Replace) Replaces a server with another or restores the servers settings to a known baseline.
+ - C(Clone) Clones settings from one server to another server with the identical hardware setup.
+ All settings except I/O identity are updated (e.g. will reset RAID). The settings in this export
+ will be destructive when uploaded to another system.
type: str
choices: ['Default', 'Clone', 'Replace']
default: 'Default'
+ version_added: 7.3.0
+ ignore_certificate_warning:
+ description:
+ - If C(ignore), it ignores the certificate warnings.
+ - If C(showerror), it shows the certificate warnings.
+ - I(ignore_certificate_warning) is considered only when I(share_name) is of type HTTPS and is
+ supported only on iDRAC9.
+ type: str
+ choices: [ignore, showerror]
+ default: ignore
+ version_added: 7.3.0
+ include_in_export:
+ description:
+ - This option is applicable when I(command) is C(export).
+ - If C(default), it exports the default Server Configuration Profile.
+ - If C(readonly), it exports the SCP with readonly attributes.
+ - If C(passwordhashvalues), it exports the SCP with password hash values.
+ - If C(customtelemetry), exports the SCP with custom telemetry attributes supported only in the iDRAC9.
+ type: str
+ choices: [default, readonly, passwordhashvalues, customtelemetry]
+ default: default
+ version_added: 7.3.0
+ import_buffer:
+ description:
+ - Used to import the buffer input of xml or json into the iDRAC.
+ - This option is applicable when I(command) is C(import) and C(preview).
+ - I(import_buffer) is mutually exclusive with I(share_name).
+ type: str
+ version_added: 7.3.0
+ proxy_support:
+ description:
+ - Proxy to be enabled or disabled.
+ - I(proxy_support) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: bool
+ default: false
+ version_added: 7.3.0
+ proxy_type:
+ description:
+ - C(http) to select HTTP type proxy.
+ - C(socks4) to select SOCKS4 type proxy.
+ - I(proxy_type) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ choices: [http, socks4]
+ default: http
+ version_added: 7.3.0
+ proxy_server:
+ description:
+ - I(proxy_server) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).
+ - I(proxy_server) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ version_added: 7.3.0
+ proxy_port:
+ description:
+ - Proxy port to authenticate.
+ - I(proxy_port) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).
+ - I(proxy_port) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ default: "80"
+ version_added: 7.3.0
+ proxy_username:
+ description:
+ - Proxy username to authenticate.
+ - I(proxy_username) is considered only when I(share_name) is of type HTTP or HTTPS
+ and is supported only on iDRAC9.
+ type: str
+ version_added: 7.3.0
+ proxy_password:
+ description:
+ - Proxy password to authenticate.
+ - I(proxy_password) is considered only when I(share_name) is of type HTTP or HTTPS
+ and is supported only on iDRAC9.
+ type: str
+ version_added: 7.3.0
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.14"
author:
- "Jagadeesh N V(@jagadeeshnv)"
- "Felix Stephen (@felixs88)"
+ - "Jennifer John (@Jennifer-John)"
+ - "Shivam Sharma (@ShivamSh3)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- This module supports C(check_mode).
- - To import Server Configuration Profile (SCP) on the iDRAC7 and iDRAC8-based servers,
+ - To import Server Configuration Profile (SCP) on the iDRAC8-based servers,
the servers must have iDRAC Enterprise license or later.
+ - For C(import) operation, C(check_mode) is supported only when I(target) is C(ALL).
+ - This module supports IPv4 and IPv6 addresses.
'''
EXAMPLES = r'''
@@ -117,11 +213,12 @@ EXAMPLES = r'''
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
- scp_components: IDRAC
+ scp_components:
+ - IDRAC
scp_file: example_file
export_format: JSON
export_use: Clone
- job_wait: True
+ job_wait: true
- name: Import SCP with IDRAC components in JSON format from a local path
dellemc.openmanage.idrac_server_config_profile:
@@ -131,11 +228,12 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
command: import
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.json
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: False
+ job_wait: false
- name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name
dellemc.openmanage.idrac_server_config_profile:
@@ -144,10 +242,11 @@ EXAMPLES = r'''
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
- scp_components: "BIOS"
+ scp_components:
+ - BIOS
export_format: XML
export_use: Default
- job_wait: True
+ job_wait: true
- name: Import SCP with BIOS components in XML format from a NFS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -157,11 +256,12 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
command: import
- scp_components: "BIOS"
+ scp_components:
+ - BIOS
scp_file: 192.168.0.1_20210618_162856.xml
shutdown_type: NoReboot
end_host_power_state: "Off"
- job_wait: False
+ job_wait: false
- name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name
dellemc.openmanage.idrac_server_config_profile:
@@ -172,12 +272,12 @@ EXAMPLES = r'''
share_name: "\\\\192.168.0.2\\share"
share_user: share_username@domain
share_password: share_password
- share_mnt: /mnt/cifs
scp_file: example_file.xml
- scp_components: "RAID"
+ scp_components:
+ - RAID
export_format: XML
export_use: Default
- job_wait: True
+ job_wait: true
- name: Import SCP with RAID components in XML format from a CIFS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -188,13 +288,13 @@ EXAMPLES = r'''
share_name: "\\\\192.168.0.2\\share"
share_user: share_username
share_password: share_password
- share_mnt: /mnt/cifs
command: import
- scp_components: "RAID"
+ scp_components:
+ - RAID
scp_file: example_file.xml
shutdown_type: Forced
end_host_power_state: "On"
- job_wait: True
+ job_wait: true
- name: Export SCP with ALL components in JSON format to a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
@@ -206,9 +306,10 @@ EXAMPLES = r'''
share_user: share_username
share_password: share_password
scp_file: example_file.json
- scp_components: ALL
+ scp_components:
+ - ALL
export_format: JSON
- job_wait: False
+ job_wait: false
- name: Import SCP with ALL components in JSON format from a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
@@ -223,7 +324,7 @@ EXAMPLES = r'''
scp_file: example_file.json
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: True
+ job_wait: true
- name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name
dellemc.openmanage.idrac_server_config_profile:
@@ -234,10 +335,11 @@ EXAMPLES = r'''
share_name: "https://192.168.0.4/share"
share_user: share_username
share_password: share_password
- scp_components: ALL
+ scp_components:
+ - ALL
export_format: XML
export_use: Replace
- job_wait: True
+ job_wait: true
- name: Import SCP with ALL components in XML format from a HTTPS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -252,9 +354,9 @@ EXAMPLES = r'''
scp_file: 192.168.0.1_20160618_164647.xml
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: False
+ job_wait: false
-- name: Preview SCP with ALL components in XML format from a CIFS share path
+- name: Preview SCP with IDRAC components in XML format from a CIFS share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -264,11 +366,12 @@ EXAMPLES = r'''
share_user: share_username
share_password: share_password
command: preview
- scp_components: "ALL"
+ scp_components:
+ - ALL
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
-- name: Preview SCP with ALL components in JSON format from a NFS share path
+- name: Preview SCP with IDRAC components in JSON format from a NFS share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -276,11 +379,12 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
command: preview
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
-- name: Preview SCP with ALL components in XML format from a HTTP share path
+- name: Preview SCP with IDRAC components in XML format from a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -290,11 +394,12 @@ EXAMPLES = r'''
share_user: share_username
share_password: share_password
command: preview
- scp_components: "ALL"
+ scp_components:
+ - ALL
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
-- name: Preview SCP with ALL components in XML format from a local path
+- name: Preview SCP with IDRAC components in XML format from a local path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -302,9 +407,72 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
command: preview
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.json
- job_wait: False
+ job_wait: false
+
+- name: Import SCP with IDRAC components in XML format from the XML content.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ job_wait: true
+ import_buffer: "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'>
+ Disabled</Attribute></Component></SystemConfiguration>"
+
+- name: Export SCP with ALL components in XML format using HTTP proxy.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ scp_components:
+ - ALL
+ share_name: "http://192.168.0.1/http-share"
+ proxy_support: true
+ proxy_server: 192.168.0.5
+ proxy_port: 8080
+ proxy_username: proxy_username
+ proxy_password: proxy_password
+ proxy_type: http
+ include_in_export: passwordhashvalues
+ job_wait: true
+
+- name: Import SCP with IDRAC and BIOS components in XML format using SOCKS4 proxy
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ - BIOS
+ share_name: "https://192.168.0.1/http-share"
+ proxy_support: true
+ proxy_server: 192.168.0.6
+ proxy_port: 8080
+ proxy_type: socks4
+ scp_file: filename.xml
+ job_wait: true
+
+- name: Import SCP with IDRAC components in JSON format from the JSON content.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ job_wait: true
+ import_buffer: "{\"SystemConfiguration\": {\"Components\": [{\"FQDD\": \"iDRAC.Embedded.1\",\"Attributes\":
+ [{\"Name\": \"SNMP.1#AgentCommunity\",\"Value\": \"public1\"}]}]}}"
'''
RETURN = r'''
@@ -357,13 +525,12 @@ error_info:
import os
import json
-import re
-import copy
from datetime import datetime
from os.path import exists
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
-from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import idrac_redfish_job_tracking, \
+ strip_substr_dict
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.parse import urlparse
@@ -374,6 +541,21 @@ CHANGES_FOUND = "Changes found to be applied."
NO_CHANGES_FOUND = "No changes found to be applied."
INVALID_FILE = "Invalid file path provided."
JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/{job_id}"
+IGNORE_WARNING = {"ignore": "Enabled", "showerror": "Disabled"}
+IN_EXPORTS = {"default": "Default", "readonly": "IncludeReadOnly", "passwordhashvalues": "IncludePasswordHashValues",
+ "customtelemetry": "IncludeCustomTelemetry"}
+SCP_ALL_ERR_MSG = "The option ALL cannot be used with options IDRAC, BIOS, NIC, or RAID."
+MUTUALLY_EXCLUSIVE = "import_buffer is mutually exclusive with {0}."
+PROXY_ERR_MSG = "proxy_support is enabled but all of the following are missing: proxy_server"
+iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+FAIL_MSG = "Failed to {0} scp."
+TARGET_INVALID_MSG = "Unable to {command} the {invalid_targets} from the SCP file\
+ because the values {invalid_targets} are invalid.\
+ The valid values are {valid_targets}. Enter the valid values and retry the operation."
+DOMAIN_LIST = ["\\", "@"]
+ERROR_CODES = ["SYS041", "SYS044", "SYS045", "SYS046", "SYS047", "SYS048", "SYS050", "SYS051", "SYS062",
+ "SYS063", "SYS064", "SYS065", "SYS067", "SYS068", "SYS070", "SYS071", "SYS072",
+ "SYS073", "SYS075", "SYS076", "SYS077", "SYS078", "SYS079", "SYS080"]
def get_scp_file_format(module):
@@ -394,7 +576,8 @@ def get_scp_file_format(module):
def response_format_change(response, params, file_name):
resp = {}
if params["job_wait"]:
- response = response.json_data
+ if hasattr(response, "json_data"):
+ response = response.json_data
response.pop("Description", None)
response.pop("Name", None)
response.pop("EndTime", None)
@@ -404,8 +587,7 @@ def response_format_change(response, params, file_name):
if response.get("Oem") is not None:
response.update(response["Oem"]["Dell"])
response.pop("Oem", None)
- sep = "/" if "/" in params["share_name"] else "\\"
- response["file"] = "{0}{1}{2}".format(params["share_name"], sep, file_name)
+ response = get_file(params, response, file_name)
response["retval"] = True
else:
location = response.headers.get("Location")
@@ -417,13 +599,41 @@ def response_format_change(response, params, file_name):
resp["Status"] = "Success"
resp["Message"] = "none"
resp["StatusCode"] = response.status_code
- sep = "/" if "/" in params["share_name"] else "\\"
- resp["file"] = "{0}{1}{2}".format(params["share_name"], sep, file_name)
+ resp = get_file(params, resp, file_name)
resp["retval"] = True
response = resp
return response
+def get_file(params, response, file_name):
+ if params.get("share_name") is not None:
+ sep = "/" if "/" in params.get("share_name") else "\\"
+ response["file"] = "{0}{1}{2}".format(params.get("share_name"), sep, file_name)
+ return response
+
+
+def get_proxy_share(module):
+ proxy_share = {}
+ proxy_support = module.params.get("proxy_support")
+ proxy_type = module.params["proxy_type"]
+ proxy_server = module.params.get("proxy_server")
+ proxy_port = module.params["proxy_port"]
+ proxy_username = module.params.get("proxy_username")
+ proxy_password = module.params.get("proxy_password")
+ if proxy_support is True and proxy_server is None:
+ module.fail_json(msg=PROXY_ERR_MSG)
+ if proxy_support is True:
+ proxy_share["proxy_server"] = proxy_server
+ proxy_share["proxy_username"] = proxy_username
+ proxy_share["proxy_password"] = proxy_password
+ proxy_share["proxy_port"] = proxy_port
+ proxy_share["proxy_type"] = proxy_type.upper()
+ proxy_share["proxy_support"] = "Enabled"
+ else:
+ proxy_share["proxy_support"] = "Disabled"
+ return proxy_share
+
+
def run_export_import_scp_http(idrac, module):
share_url = urlparse(module.params["share_name"])
share = {}
@@ -435,40 +645,69 @@ def run_export_import_scp_http(idrac, module):
scp_file_name_format = scp_file
share["username"] = module.params.get("share_user")
share["password"] = module.params.get("share_password")
+ scp_target = ",".join(module.params["scp_components"])
command = module.params["command"]
+ if share["share_type"] == "HTTPS":
+ share["ignore_certificate_warning"] = IGNORE_WARNING[module.params["ignore_certificate_warning"]]
if command == "import":
- scp_response = idrac.import_scp_share(shutdown_type=module.params["shutdown_type"],
- host_powerstate=module.params["end_host_power_state"],
- job_wait=module.params["job_wait"],
- target=module.params["scp_components"], share=share, )
+ perform_check_mode(module, idrac)
+ if share["share_type"] in ["HTTP", "HTTPS"]:
+ proxy_share = get_proxy_share(module)
+ share.update(proxy_share)
+ idrac_import_scp_params = {
+ "target": scp_target, "share": share, "job_wait": module.params["job_wait"],
+ "host_powerstate": module.params["end_host_power_state"], "shutdown_type": module.params["shutdown_type"]
+ }
+ scp_response = idrac.import_scp_share(**idrac_import_scp_params)
+ scp_response = wait_for_job_tracking_redfish(module, idrac, scp_response)
elif command == "export":
scp_file_name_format = get_scp_file_format(module)
share["file_name"] = scp_file_name_format
+ include_in_export = IN_EXPORTS[module.params["include_in_export"]]
+ if share["share_type"] in ["HTTP", "HTTPS"]:
+ proxy_share = get_proxy_share(module)
+ share.update(proxy_share)
scp_response = idrac.export_scp(export_format=module.params["export_format"],
export_use=module.params["export_use"],
- target=module.params["scp_components"],
- job_wait=module.params["job_wait"], share=share, )
+ target=scp_target,
+ job_wait=False, share=share, # Hardcoding it as false because job tracking is done in idrac_redfish.py as well.
+ include_in_export=include_in_export)
+ scp_response = wait_for_job_tracking_redfish(
+ module, idrac, scp_response
+ )
scp_response = response_format_change(scp_response, module.params, scp_file_name_format)
- if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
- module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ exit_on_failure(module, scp_response, command)
return scp_response
+def perform_check_mode(module, idrac, http_share=True):
+ if module.check_mode:
+ module.params["job_wait"] = True
+ scp_resp = preview_scp_redfish(module, idrac, http_share, import_job_wait=True)
+ if "SYS081" in scp_resp["MessageId"] or "SYS082" in scp_resp["MessageId"]:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif "SYS069" in scp_resp["MessageId"]:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ module.fail_json(msg=scp_resp)
+
+
def get_scp_share_details(module):
share_name = module.params.get("share_name")
command = module.params["command"]
scp_file_name_format = get_scp_file_format(module)
- if ":" in share_name:
- nfs_split = share_name.split(":")
- share = {"share_ip": nfs_split[0], "share_name": nfs_split[1], "share_type": "NFS"}
+ if ":/" in share_name:
+ nfs_split = share_name.split(":/", 1)
+ share = {"share_ip": nfs_split[0], "share_name": "/{0}".format(nfs_split[1]), "share_type": "NFS"}
if command == "export":
share["file_name"] = scp_file_name_format
elif "\\" in share_name:
- ip_pattern = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
- share_path = re.split(ip_pattern, share_name)
- share_ip = re.findall(ip_pattern, share_name)
- share_path_name = "\\".join(list(filter(None, share_path[-1].split("\\"))))
- share = {"share_ip": share_ip[0], "share_name": share_path_name, "share_type": "CIFS",
+ cifs_share = share_name.split("\\", 3)
+ share_ip = cifs_share[2]
+ share_path_name = cifs_share[-1]
+ if not any(domain in module.params.get("share_user") for domain in DOMAIN_LIST):
+ module.params["share_user"] = ".\\{0}".format(module.params.get("share_user"))
+ share = {"share_ip": share_ip, "share_name": share_path_name, "share_type": "CIFS",
"username": module.params.get("share_user"), "password": module.params.get("share_password")}
if command == "export":
share["file_name"] = scp_file_name_format
@@ -482,20 +721,24 @@ def get_scp_share_details(module):
def export_scp_redfish(module, idrac):
command = module.params["command"]
share, scp_file_name_format = get_scp_share_details(module)
+ scp_components = ",".join(module.params["scp_components"])
+ include_in_export = IN_EXPORTS[module.params["include_in_export"]]
if share["share_type"] == "LOCAL":
scp_response = idrac.export_scp(export_format=module.params["export_format"],
export_use=module.params["export_use"],
- target=module.params["scp_components"],
+ target=scp_components, include_in_export=include_in_export,
job_wait=False, share=share, )
scp_response = wait_for_response(scp_response, module, share, idrac)
else:
scp_response = idrac.export_scp(export_format=module.params["export_format"],
export_use=module.params["export_use"],
- target=module.params["scp_components"],
- job_wait=module.params["job_wait"], share=share, )
+ target=scp_components, include_in_export=include_in_export,
+ job_wait=False, share=share, ) # Assigning it as false because job tracking is done in idrac_redfish.py as well.
+ scp_response = wait_for_job_tracking_redfish(
+ module, idrac, scp_response
+ )
scp_response = response_format_change(scp_response, module.params, scp_file_name_format)
- if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
- module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ exit_on_failure(module, scp_response, command)
return scp_response
@@ -516,136 +759,198 @@ def wait_for_response(scp_resp, module, share, idrac):
def preview_scp_redfish(module, idrac, http_share, import_job_wait=False):
+ import_buffer = module.params.get("import_buffer")
command = module.params["command"]
- scp_target = module.params["scp_components"]
+ scp_targets = 'ALL' # Assigning it as ALL because it is the only target for preview.
job_wait_option = module.params["job_wait"]
if command == "import":
job_wait_option = import_job_wait
- if http_share:
- share_url = urlparse(module.params["share_name"])
- share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'),
- "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"),
- "username": module.params.get("share_user"), "password": module.params.get("share_password")}
+ share = {}
+ if not import_buffer:
+ if http_share:
+ share_url = urlparse(module.params["share_name"])
+ share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'),
+ "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"),
+ "username": module.params.get("share_user"), "password": module.params.get("share_password")}
+ if http_share == "HTTPS":
+ share["ignore_certificate_warning"] = IGNORE_WARNING[module.params["ignore_certificate_warning"]]
+ else:
+ share, _scp_file_name_format = get_scp_share_details(module)
+ share["file_name"] = module.params.get("scp_file")
+ buffer_text = get_buffer_text(module, share)
+ scp_response = idrac.import_preview(import_buffer=buffer_text, target=scp_targets,
+ share=share, job_wait=False) # Assigning it as false because job tracking is done in idrac_redfish.py as well
+ scp_response = wait_for_job_tracking_redfish(
+ module, idrac, scp_response)
else:
- share, scp_file_name_format = get_scp_share_details(module)
- share["file_name"] = module.params.get("scp_file")
+ scp_response = idrac.import_preview(import_buffer=import_buffer, target=scp_targets, job_wait=job_wait_option)
+ scp_response = response_format_change(scp_response, module.params, share.get("file_name"))
+ exit_on_failure(module, scp_response, command)
+ return scp_response
+
+
+def exit_on_failure(module, scp_response, command):
+ if isinstance(scp_response, dict) and (scp_response.get("TaskStatus") == "Critical" or
+ scp_response.get("JobState") in ("Failed", "CompletedWithErrors")):
+ module.fail_json(msg=FAIL_MSG.format(command), scp_status=scp_response)
+
+
+def get_buffer_text(module, share):
buffer_text = None
if share["share_type"] == "LOCAL":
- scp_target = "ALL"
file_path = "{0}{1}{2}".format(share["share_name"], os.sep, share["file_name"])
if not exists(file_path):
module.fail_json(msg=INVALID_FILE)
with open(file_path, "r") as file_obj:
buffer_text = file_obj.read()
- scp_response = idrac.import_preview(import_buffer=buffer_text, target=scp_target,
- share=share, job_wait=job_wait_option)
- scp_response = response_format_change(scp_response, module.params, share["file_name"])
- if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
- module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
- return scp_response
+ return buffer_text
def import_scp_redfish(module, idrac, http_share):
+ import_buffer = module.params.get("import_buffer")
command = module.params["command"]
- scp_target = module.params["scp_components"]
- job_wait = copy.copy(module.params["job_wait"])
- if module.check_mode:
- module.params["job_wait"] = True
- scp_resp = preview_scp_redfish(module, idrac, http_share, import_job_wait=True)
- if "SYS081" in scp_resp["MessageId"] or "SYS082" in scp_resp["MessageId"]:
- module.exit_json(msg=CHANGES_FOUND, changed=True)
- else:
- module.fail_json(msg=scp_resp)
- if http_share:
- share_url = urlparse(module.params["share_name"])
- share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'),
- "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"),
- "username": module.params.get("share_user"), "password": module.params.get("share_password")}
- else:
- share, scp_file_name_format = get_scp_share_details(module)
+ scp_targets = ",".join(module.params["scp_components"])
+ perform_check_mode(module, idrac, http_share)
+ share = {}
+ if not import_buffer:
+ share, _scp_file_name_format = get_scp_share_details(module)
share["file_name"] = module.params.get("scp_file")
- buffer_text = None
- share_dict = share
- if share["share_type"] == "LOCAL":
- scp_target = "ALL"
- file_path = "{0}{1}{2}".format(share["share_name"], os.sep, share["file_name"])
- if not exists(file_path):
- module.fail_json(msg=INVALID_FILE)
- with open(file_path, "r") as file_obj:
- buffer_text = file_obj.read()
- share_dict = {}
- module.params["job_wait"] = job_wait
- scp_response = idrac.import_scp_share(shutdown_type=module.params["shutdown_type"],
- host_powerstate=module.params["end_host_power_state"],
- job_wait=module.params["job_wait"],
- target=scp_target,
- import_buffer=buffer_text, share=share_dict, )
- scp_response = response_format_change(scp_response, module.params, share["file_name"])
- if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
- module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ buffer_text = get_buffer_text(module, share)
+ share_dict = share
+ if share["share_type"] == "LOCAL":
+ share_dict = {}
+ idrac_import_scp_params = {
+ "import_buffer": buffer_text, "target": scp_targets, "share": share_dict, "job_wait": module.params["job_wait"],
+ "host_powerstate": module.params["end_host_power_state"], "shutdown_type": module.params["shutdown_type"]
+ }
+ scp_response = idrac.import_scp_share(**idrac_import_scp_params)
+ scp_response = wait_for_job_tracking_redfish(module, idrac, scp_response)
+ else:
+ scp_response = idrac.import_scp(import_buffer=import_buffer, target=scp_targets, job_wait=module.params["job_wait"])
+ scp_response = response_format_change(scp_response, module.params, share.get("file_name"))
+ exit_on_failure(module, scp_response, command)
return scp_response
-def main():
- specs = {
- "command": {"required": False, "type": 'str',
- "choices": ['export', 'import', 'preview'], "default": 'export'},
- "job_wait": {"required": True, "type": 'bool'},
- "share_name": {"required": True, "type": 'str'},
- "share_user": {"required": False, "type": 'str'},
- "share_password": {"required": False, "type": 'str',
- "aliases": ['share_pwd'], "no_log": True},
- "scp_components": {"required": False,
- "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'],
- "default": 'ALL'},
- "scp_file": {"required": False, "type": 'str'},
- "shutdown_type": {"required": False,
- "choices": ['Graceful', 'Forced', 'NoReboot'],
- "default": 'Graceful'},
- "end_host_power_state": {"required": False,
- "choices": ['On', 'Off'],
- "default": 'On'},
- "export_format": {"required": False, "type": 'str',
- "choices": ['JSON', 'XML'], "default": 'XML'},
- "export_use": {"required": False, "type": 'str',
- "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'}
+def wait_for_job_tracking_redfish(module, idrac, scp_response):
+ job_id = scp_response.headers["Location"].split("/")[-1]
+ if module.params["job_wait"]:
+ job_failed, _msg, job_dict, _wait_time = idrac_redfish_job_tracking(
+ idrac, iDRAC_JOB_URI.format(job_id=job_id))
+ if job_failed or job_dict.get("MessageId", "") in ERROR_CODES:
+ module.exit_json(failed=True, status_msg=job_dict, job_id=job_id, msg=FAIL_MSG.format(module.params["command"]))
+ scp_response = job_dict
+ return scp_response
+
+
+def validate_input(module, scp_components):
+ if len(scp_components) != 1 and "ALL" in scp_components:
+ module.fail_json(msg=SCP_ALL_ERR_MSG)
+ if module.params["command"] in ["import", "preview"]:
+ if module.params.get("import_buffer") is not None:
+ if module.params.get("scp_file") is not None:
+ module.fail_json(msg=MUTUALLY_EXCLUSIVE.format("scp_file"))
+ if module.params.get("share_name") is not None:
+ module.fail_json(msg=MUTUALLY_EXCLUSIVE.format("share_name"))
+
+
+def validate_scp_components(module, idrac):
+ components = idrac.invoke_request(REDFISH_SCP_BASE_URI, "GET")
+ all_components = strip_substr_dict(components.json_data)
+ scp_components = module.params.get("scp_components")
+ command = module.params.get("command")
+ oem = all_components['Actions']['Oem']
+ operation_dict = {
+ "export": "ExportSystemConfiguration",
+ "import": "ImportSystemConfiguration",
+ "preview": "ImportSystemConfigurationPreview"
}
+ for each in oem:
+ if each.endswith(operation_dict.get(command.lower())):
+ allowable = oem.get(each).get('ShareParameters').get('Target@Redfish.AllowableValues')
+ invalid_comp = list(set(scp_components) - set(allowable))
+ if invalid_comp:
+ msg = TARGET_INVALID_MSG.format(command=command, invalid_targets=invalid_comp, valid_targets=allowable)
+ module.exit_json(msg=msg, failed=True)
+
+
+class ImportCommand():
+ def __init__(self, idrac, http_share, module):
+ self.idrac = idrac
+ self.http_share = http_share
+ self.module = module
+
+ def execute(self):
+ changed = False
+ if self.http_share:
+ scp_status = run_export_import_scp_http(self.idrac, self.module)
+ if "SYS069" in scp_status.get("MessageId", ""):
+ changed = False
+ elif "SYS053" in scp_status.get("MessageId", ""):
+ changed = True
+ else:
+ scp_status = import_scp_redfish(self.module, self.idrac, self.http_share)
+ if "No changes were applied" not in scp_status.get('Message', ""):
+ changed = True
+ elif "SYS043" in scp_status.get("MessageId", ""):
+ changed = True
+ elif "SYS069" in scp_status.get("MessageId", ""):
+ changed = False
+ return scp_status, changed
+
+
+class ExportCommand():
+ def __init__(self, idrac, http_share, module):
+ self.idrac = idrac
+ self.http_share = http_share
+ self.module = module
+
+ def execute(self):
+ if self.http_share:
+ scp_status = run_export_import_scp_http(self.idrac, self.module)
+ else:
+ scp_status = export_scp_redfish(self.module, self.idrac)
+ return scp_status, False
+
+
+class PreviewCommand():
+ def __init__(self, idrac, http_share, module):
+ self.idrac = idrac
+ self.http_share = http_share
+ self.module = module
+
+ def execute(self):
+ scp_status = preview_scp_redfish(self.module, self.idrac, self.http_share, import_job_wait=False)
+ return scp_status, False
+
+
+def main():
+ specs = get_argument_spec()
specs.update(idrac_auth_params)
module = AnsibleModule(
argument_spec=specs,
required_if=[
- ["command", "import", ["scp_file"]],
- ["command", "preview", ["scp_file"]],
+ ["command", "export", ["share_name"]],
+ ["proxy_support", True, ["proxy_server"]]
],
supports_check_mode=True)
+ validate_input(module, module.params.get("scp_components"))
try:
- changed = False
- http_share = module.params["share_name"].lower().startswith(('http://', 'https://'))
+ http_share = False
+ if module.params.get("share_name") is not None:
+ http_share = module.params["share_name"].lower().startswith(('http://', 'https://'))
with iDRACRedfishAPI(module.params) as idrac:
+ validate_scp_components(module, idrac)
command = module.params['command']
if command == 'import':
- if http_share:
- scp_status = run_export_import_scp_http(idrac, module)
- if "SYS069" in scp_status.get("MessageId", ""):
- changed = False
- elif "SYS053" in scp_status.get("MessageId", ""):
- changed = True
- else:
- scp_status = import_scp_redfish(module, idrac, http_share)
- if "No changes were applied" not in scp_status.get('Message', ""):
- changed = True
- elif "SYS043" in scp_status.get("MessageId", ""):
- changed = True
- elif "SYS069" in scp_status.get("MessageId", ""):
- changed = False
- elif command == "export":
- if http_share:
- scp_status = run_export_import_scp_http(idrac, module)
- else:
- scp_status = export_scp_redfish(module, idrac)
+ command_obj = ImportCommand(idrac, http_share, module)
+ elif command == 'export':
+ command_obj = ExportCommand(idrac, http_share, module)
else:
- scp_status = preview_scp_redfish(module, idrac, http_share, import_job_wait=False)
+ command_obj = PreviewCommand(idrac, http_share, module)
+ scp_status, changed = command_obj.execute()
+
if module.params.get('job_wait'):
scp_status = strip_substr_dict(scp_status)
msg = "Successfully {0}ed the Server Configuration Profile."
@@ -654,7 +959,7 @@ def main():
msg = "Successfully triggered the job to {0} the Server Configuration Profile."
module.exit_json(msg=msg.format(command), scp_status=scp_status)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (ImportError, ValueError, RuntimeError, SSLValidationError,
@@ -662,5 +967,42 @@ def main():
module.fail_json(msg=str(e))
+def get_argument_spec():
+ return {
+ "command": {"required": False, "type": 'str',
+ "choices": ['export', 'import', 'preview'], "default": 'export'},
+ "job_wait": {"required": True, "type": 'bool'},
+ "share_name": {"required": False, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str',
+ "aliases": ['share_pwd'], "no_log": True},
+ "scp_components": {"type": "list", "required": False, "elements": "str",
+ "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID', 'FC', 'InfiniBand', 'SupportAssist',
+ 'EventFilters', 'System', 'LifecycleController', 'AHCI', 'PCIeSSD'],
+ "default": ['ALL'], "aliases": ["target"]},
+ "scp_file": {"required": False, "type": 'str'},
+ "shutdown_type": {"required": False,
+ "choices": ['Graceful', 'Forced', 'NoReboot'],
+ "default": 'Graceful'},
+ "end_host_power_state": {"required": False,
+ "choices": ['On', 'Off'],
+ "default": 'On'},
+ "export_format": {"required": False, "type": 'str',
+ "choices": ['JSON', 'XML'], "default": 'XML'},
+ "export_use": {"required": False, "type": 'str',
+ "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'},
+ "ignore_certificate_warning": {"required": False, "choices": ["ignore", "showerror"], "default": "ignore"},
+ "include_in_export": {"required": False, "type": "str", "default": "default",
+ "choices": ["default", "readonly", "passwordhashvalues", "customtelemetry"]},
+ "import_buffer": {"type": "str", "required": False},
+ "proxy_support": {"type": "bool", "required": False, "default": False},
+ "proxy_type": {"type": "str", "required": False, "choices": ["http", "socks4"], "default": "http"},
+ "proxy_server": {"type": "str", "required": False},
+ "proxy_port": {"type": "str", "required": False, "default": "80"},
+ "proxy_username": {"type": "str", "required": False},
+ "proxy_password": {"type": "str", "required": False, "no_log": True},
+ }
+
+
if __name__ == '__main__':
main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py
index d078b0851..562ccc1ff 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -31,13 +31,14 @@ options:
default: Enabled
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -45,27 +46,27 @@ EXAMPLES = """
---
- name: Enable iDRAC syslog
dellemc.openmanage.idrac_syslog:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- share_password: "share_user_pwd"
- share_user: "share_user_name"
- share_mnt: "/mnt/share"
- syslog: "Enabled"
+ share_name: "192.168.0.2:/share"
+ share_password: "share_user_pwd"
+ share_user: "share_user_name"
+ share_mnt: "/mnt/share"
+ syslog: "Enabled"
- name: Disable iDRAC syslog
dellemc.openmanage.idrac_syslog:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- share_password: "share_user_pwd"
- share_user: "share_user_name"
- share_mnt: "/mnt/share"
- syslog: "Disabled"
+ share_name: "192.168.0.2:/share"
+ share_password: "share_user_pwd"
+ share_user: "share_user_name"
+ share_mnt: "/mnt/share"
+ syslog: "Disabled"
"""
RETURN = r'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py
index 61827f2df..21dbb105f 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -25,10 +25,11 @@ extends_documentation_fragment:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Rajeev Arakkal (@rajeevarakkal)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py
index 6227571c0..82864340b 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -70,13 +70,14 @@ options:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -84,9 +85,9 @@ EXAMPLES = """
---
- name: Configure time zone and NTP on iDRAC
dellemc.openmanage.idrac_timezone_ntp:
- idrac_ip: "190.168.0.1"
+ idrac_ip: "190.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
setup_idrac_timezone: "UTC"
enable_ntp: Enabled
@@ -158,7 +159,6 @@ import json
try:
from omdrivers.enums.iDRAC.iDRAC import NTPEnable_NTPConfigGroupTypes
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py
index df9f9adbe..bcd16b872 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2018-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -31,13 +31,11 @@ options:
description:
- Select C(present) to create or modify a user account.
- Select C(absent) to remove a user account.
- - Ensure Lifecycle Controller is available because the user operation
- uses the capabilities of Lifecycle Controller.
choices: [present, absent]
default: present
user_name:
type: str
- required: True
+ required: true
description: Provide the I(user_name) of the account to be created, deleted or modified.
user_password:
type: str
@@ -59,7 +57,13 @@ options:
access virtual console, access virtual media, and execute debug commands.
- A user with C(ReadOnly) privilege can only log in to iDRAC.
- A user with C(None), no privileges assigned.
+ - Will be ignored, if custom_privilege parameter is provided.
choices: [Administrator, ReadOnly, Operator, None]
+ custom_privilege:
+ type: int
+ description:
+ - The privilege level assigned to the user.
+ version_added: "8.1.0"
ipmi_lan_privilege:
type: str
description: The Intelligent Platform Management Interface LAN privilege level assigned to the user.
@@ -100,7 +104,7 @@ requirements:
- "python >= 3.8.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- This module supports C(check_mode).
"""
@@ -211,8 +215,11 @@ from ansible.module_utils.basic import AnsibleModule
ACCOUNT_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/"
ATTRIBUTE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Attributes/"
-PRIVILEGE = {"Administrator": 511, "Operator": 499, "ReadOnly": 1, "None": 0}
+USER_ROLES = {"Administrator": 511, "Operator": 499, "ReadOnly": 1, "None": 0}
ACCESS = {0: "Disabled", 1: "Enabled"}
+INVALID_PRIVILAGE_MSG = "custom_privilege value should be from 0 to 511."
+INVALID_PRIVILAGE_MIN = 0
+INVALID_PRIVILAGE_MAX = 511
def compare_payload(json_payload, idrac_attr):
@@ -270,10 +277,13 @@ def get_payload(module, slot_id, action=None):
:param slot_id: slot id for user slot
:return: json data with slot id
"""
+ user_privilege = module.params["custom_privilege"] if "custom_privilege" in module.params and \
+ module.params["custom_privilege"] is not None else USER_ROLES.get(module.params["privilege"])
+
slot_payload = {"Users.{0}.UserName": module.params["user_name"],
"Users.{0}.Password": module.params["user_password"],
"Users.{0}.Enable": ACCESS.get(module.params["enable"]),
- "Users.{0}.Privilege": PRIVILEGE.get(module.params["privilege"]),
+ "Users.{0}.Privilege": user_privilege,
"Users.{0}.IpmiLanPrivilege": module.params["ipmi_lan_privilege"],
"Users.{0}.IpmiSerialPrivilege": module.params["ipmi_serial_privilege"],
"Users.{0}.SolEnable": ACCESS.get(module.params["sol_enable"]),
@@ -378,6 +388,14 @@ def remove_user_account(module, idrac, slot_uri, slot_id):
return response, msg
+def validate_input(module):
+ if module.params["state"] == "present":
+ user_privilege = module.params["custom_privilege"] if "custom_privilege" in module.params and \
+ module.params["custom_privilege"] is not None else USER_ROLES.get(module.params["privilege"], 0)
+ if INVALID_PRIVILAGE_MIN > user_privilege or user_privilege > INVALID_PRIVILAGE_MAX:
+ module.fail_json(msg=INVALID_PRIVILAGE_MSG)
+
+
def main():
specs = {
"state": {"required": False, "choices": ['present', 'absent'], "default": "present"},
@@ -385,6 +403,7 @@ def main():
"user_name": {"required": True},
"user_password": {"required": False, "no_log": True},
"privilege": {"required": False, "choices": ['Administrator', 'ReadOnly', 'Operator', 'None']},
+ "custom_privilege": {"required": False, "type": "int"},
"ipmi_lan_privilege": {"required": False, "choices": ['Administrator', 'Operator', 'User', 'No Access']},
"ipmi_serial_privilege": {"required": False, "choices": ['Administrator', 'Operator', 'User', 'No Access']},
"enable": {"required": False, "type": "bool"},
@@ -398,6 +417,7 @@ def main():
argument_spec=specs,
supports_check_mode=True)
try:
+ validate_input(module)
with iDRACRedfishAPI(module.params, req_session=True) as idrac:
user_attr, slot_uri, slot_id, empty_slot_id, empty_slot_uri = get_user_account(module, idrac)
if module.params["state"] == "present":
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user_info.py
new file mode 100644
index 000000000..6d06a60be
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user_info.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_user_info
+short_description: Retrieve details of all users or a specific user on iDRAC.
+version_added: "7.0.0"
+description:
+ - "This module retrieves the list and basic details of all users or details of a specific user on
+ iDRAC"
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ user_id:
+ description:
+ - Sequential user id numbers that supports from 1 to 16.
+ - I(user_id) is mutually exclusive with I(username)
+ type: int
+ username:
+ type: str
+ description:
+ - Username of the account that is created in iDRAC local users.
+ - I(username) is mutually exclusive with I(user_id)
+requirements:
+ - "python >= 3.8.6"
+author: "Husniya Hameed(@husniya_hameed)"
+notes:
+ - Run this module on a system that has direct access to Dell iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Retrieve basic details of all user accounts.
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve user details using user_id
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ user_id: 1
+
+- name: Retrieve user details using username
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ username: user_name
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of user information retrieval.
+ returned: always
+ type: str
+ sample: "Successfully retrieved the user information."
+user_info:
+ description: Information about the user.
+ returned: success
+ type: list
+ sample: [{
+ "Description": "User Account",
+ "Enabled": false,
+ "Id": "1",
+ "Locked": false,
+ "Name": "User Account",
+ "Password": null,
+ "RoleId": "None",
+ "UserName": ""
+ }]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+from ssl import SSLError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+
+ACCOUNT = "/redfish/v1"
+SUCCESS_MSG = "Successfully retrieved the information of {0} user(s)."
+UNSUCCESS_MSG = "Unable to retrieve the user information."
+INVALID_USERID = "'user_id' is not valid."
+INVALID_USERNAME = "'username' is not valid."
+SUCCESSFUL_MSG = "Successfully retrieved the user information."
+
+
+def get_accounts_uri(idrac):
+ try:
+ account_path = idrac.invoke_request(ACCOUNT, 'GET')
+ account_service = account_path.json_data.get("AccountService").get("@odata.id")
+ accounts = idrac.invoke_request(account_service, "GET")
+ accounts_uri = accounts.json_data.get("Accounts").get("@odata.id")
+ except HTTPError:
+ accounts_uri = "/redfish/v1/AccountService/Accounts"
+ return accounts_uri
+
+
+def fetch_all_accounts(idrac, accounts_uri):
+ all_accounts = idrac.invoke_request("{0}?$expand=*($levels=1)".format(accounts_uri), 'GET')
+ all_accs = all_accounts.json_data.get("Members")
+ return all_accs
+
+
+def get_user_id_accounts(idrac, module, accounts_uri, user_id):
+ acc_dets_json_data = {}
+ try:
+ acc_uri = accounts_uri + "/{0}".format(user_id)
+ acc_dets = idrac.invoke_request(acc_uri, "GET")
+ acc_dets_json_data = strip_substr_dict(acc_dets.json_data)
+ if acc_dets_json_data.get("Oem") is not None:
+ acc_dets_json_data["Oem"]["Dell"] = strip_substr_dict(acc_dets_json_data["Oem"]["Dell"])
+ acc_dets_json_data.pop("Links", None)
+ except HTTPError:
+ module.exit_json(msg=INVALID_USERID, failed=True)
+ return acc_dets_json_data
+
+
+def get_user_name_accounts(idrac, module, accounts_uri, user_name):
+ all_accs = fetch_all_accounts(idrac, accounts_uri)
+ acc_dets_json_data = {}
+ for acc in all_accs:
+ if acc.get("UserName") == user_name:
+ acc.pop("Links", None)
+ acc_dets_json_data = strip_substr_dict(acc)
+ if acc_dets_json_data.get("Oem") is not None:
+ acc_dets_json_data["Oem"]["Dell"] = strip_substr_dict(acc_dets_json_data["Oem"]["Dell"])
+ break
+ if not bool(acc_dets_json_data):
+ module.fail_json(msg=INVALID_USERNAME, failed=True)
+ return acc_dets_json_data
+
+
+def get_all_accounts(idrac, account_uri):
+ all_accs = fetch_all_accounts(idrac, account_uri)
+ idrac_list = []
+ for acc in all_accs:
+ if acc.get("UserName") != "":
+ acc.pop("Links", None)
+ acc_dets_json_data = strip_substr_dict(acc)
+ if acc_dets_json_data.get("Oem") is not None:
+ acc_dets_json_data["Oem"]["Dell"] = strip_substr_dict(acc_dets_json_data["Oem"]["Dell"])
+ idrac_list.append(acc_dets_json_data)
+ return idrac_list
+
+
+def main():
+ specs = {
+ "user_id": {"type": 'int'},
+ "username": {"type": 'str'}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[
+ ('user_id', 'username')
+ ],
+ supports_check_mode=True
+ )
+ try:
+ with iDRACRedfishAPI(module.params, req_session=True) as idrac:
+ resp = []
+ msg = SUCCESSFUL_MSG
+ accounts_uri = get_accounts_uri(idrac)
+ user_id = module.params.get("user_id")
+ user_name = module.params.get("username")
+ if user_id is not None:
+ resp.append(get_user_id_accounts(idrac, module, accounts_uri, user_id))
+ elif user_name is not None:
+ resp.append(get_user_name_accounts(idrac, module, accounts_uri, user_name))
+ else:
+ resp.extend(get_all_accounts(idrac, accounts_uri))
+ resp_len = len(resp)
+ msg = SUCCESS_MSG.format(resp_len)
+ if resp:
+ module.exit_json(msg=msg, user_info=resp)
+ else:
+ module.fail_json(msg=UNSUCCESS_MSG, failed=True)
+ except HTTPError as err:
+ module.fail_json(msg=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py
index ac22541eb..4c5fb10db 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py
@@ -33,22 +33,22 @@ options:
required: true
type: bool
description:
- - C(True) connects the remote image file.
- - C(False) ejects the remote image file if connected.
+ - C(true) connects the remote image file.
+ - C(false) ejects the remote image file if connected.
image:
type: path
description:
- The path of the image file. The supported file types are .img and .iso.
- The file name with .img extension is redirected as a virtual floppy and a file name with .iso extension is
redirected as a virtual CDROM.
- - This option is required when I(insert) is C(True).
+ - This option is required when I(insert) is C(true).
- "The following are the examples of the share location:
CIFS share: //192.168.0.1/file_path/image_name.iso,
NFS share: 192.168.0.2:/file_path/image_name.img,
HTTP share: http://192.168.0.3/file_path/image_name.iso,
HTTPS share: https://192.168.0.4/file_path/image_name.img"
- - CIFS share is not supported by iDRAC7 and iDRAC8.
- - HTTPS share with credentials is not supported by iDRAC7 and iDRAC8.
+ - CIFS share is not supported by iDRAC8.
+ - HTTPS share with credentials is not supported by iDRAC8.
index:
type: int
description:
@@ -67,12 +67,12 @@ options:
- This module always reports as the changes found when I(password) is provided.
media_type:
type: str
- description: Type of the image file. This is applicable when I(insert) is C(True).
+ description: Type of the image file. This is applicable when I(insert) is C(true).
choices: [CD, DVD, USBStick]
force:
type: bool
- description: C(True) ejects the image file if already connected and inserts the file provided in I(image).
- This is applicable when I(insert) is C(True).
+ description: C(true) ejects the image file if already connected and inserts the file provided in I(image).
+ This is applicable when I(insert) is C(true).
default: false
resource_id:
type: str
@@ -162,7 +162,7 @@ EXAMPLES = """
ca_path: "/path/to/ca_cert.pem"
force: true
virtual_media:
- insert: false
+ insert: false
- name: Insertion and ejection of image file in single task.
dellemc.openmanage.idrac_virtual_media:
@@ -313,7 +313,7 @@ def _validate_params(module, vr_members, rd_version):
def virtual_media_operation(idrac, module, payload, vr_id):
- err_payload, inserted = [], []
+ err_payload = []
force = module.params["force"]
for i in payload:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py
index 98235b9d3..6f420bec7 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -92,34 +92,34 @@ options:
- Enables testing the connection to the domain controller.
- The connection to the domain controller is tested with the provided Active Directory service details.
- If test fails, module will error out.
- - If C(yes), I(domain_username) and I(domain_password) has to be provided.
- default: no
+ - If C(true), I(domain_username) and I(domain_password) has to be provided.
+ default: false
domain_password:
type: str
description:
- Provide the domain password.
- - This is applicable when I(test_connection) is C(yes).
+ - This is applicable when I(test_connection) is C(true).
domain_username:
type: str
description:
- Provide the domain username either in the UPN (username@domain) or NetBIOS (domain\\\\username) format.
- - This is applicable when I(test_connection) is C(yes).
+ - This is applicable when I(test_connection) is C(true).
validate_certificate:
type: bool
description:
- Enables validation of SSL certificate of the domain controller.
- - The module will always report change when this is C(yes).
- default: no
+ - The module will always report change when this is C(true).
+ default: false
certificate_file:
type: path
description:
- Provide the full path of the SSL certificate.
- The certificate should be a Root CA Certificate encoded in Base64 format.
- - This is applicable when I(validate_certificate) is C(yes).
+ - This is applicable when I(validate_certificate) is C(true).
requirements:
- "python >= 3.8.6"
notes:
- - The module will always report change when I(validate_certificate) is C(yes).
+ - The module will always report change when I(validate_certificate) is C(true).
- Run this module from a system that has direct access to OpenManage Enterprise.
- This module supports C(check_mode).
"""
@@ -136,7 +136,7 @@ EXAMPLES = """
domain_server:
- domainname.com
group_domain: domainname.com
- test_connection: yes
+ test_connection: true
domain_username: user@domainname
domain_password: domain_password
@@ -151,7 +151,7 @@ EXAMPLES = """
domain_server:
- 192.68.20.181
group_domain: domainname.com
- validate_certificate: yes
+ validate_certificate: true
certificate_file: "/path/to/certificate/file.cer"
- name: Modify domain controller IP address, network_timeout and group_domain
@@ -183,10 +183,10 @@ EXAMPLES = """
password: "password"
ca_path: "/path/to/ca_cert.pem"
name: my_ad2
- test_connection: yes
+ test_connection: true
domain_username: user@domainname
domain_password: domain_password
- validate_certificate: yes
+ validate_certificate: true
certificate_file: "/path/to/certificate/file.cer"
"""
@@ -397,7 +397,7 @@ def delete_ad(module, rest_obj, ad):
ad = rest_obj.strip_substr_dict(ad)
if module.check_mode:
module.exit_json(msg=CHANGES_FOUND, active_directory=ad, changed=True)
- resp = rest_obj.invoke_request('POST', DELETE_AD, data={"AccountProviderIds": [int(ad['Id'])]})
+ rest_obj.invoke_request('POST', DELETE_AD, data={"AccountProviderIds": [int(ad['Id'])]})
module.exit_json(msg=DELETE_SUCCESS, active_directory=ad, changed=True)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies.py
new file mode 100644
index 000000000..9e8a17fd2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies.py
@@ -0,0 +1,1114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies
+short_description: Manage OME alert policies.
+version_added: "8.3.0"
+description: This module allows you to create, modify, or delete alert policies on OpenManage Enterprise or OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ name:
+ description:
+ - Name of an alert policy or a list of alert policies.
+ - More than one policy name is applicable when I(state) is C(absent) and I(state) is C(present) with only I(enable) provided.
+ type: list
+ elements: str
+ required: true
+ state:
+ description:
+ - C(present) allows you to create an alert policy or update if the policy name already exists.
+ - C(absent) allows you to delete an alert policy.
+ default: present
+ choices: [present, absent]
+ type: str
+ enable:
+ description:
+ - C(true) allows you to enable an alert policy.
+ - C(false) allows you to disable an alert policy.
+ - This is applicable only when I(state) is C(present).
+ type: bool
+ new_name:
+ description:
+ - New name for the alert policy.
+ - This is applicable only when I(state) is C(present), and an alert policy exists.
+ type: str
+ description:
+ description:
+ - Description for the alert policy.
+ - This is applicable only when I(state) is C(present)
+ type: str
+ device_service_tag:
+ description:
+ - List of device service tags on which the alert policy will be applicable.
+ - This option is mutually exclusive with I(device_group), I(specific_undiscovered_devices), I(any_undiscovered_devices) and I(all_devices).
+ - This is applicable only when I(state) is C(present)
+ type: list
+ elements: str
+ device_group:
+ description:
+ - List of device group names on which the alert policy is applicable.
+ - This option is mutually exclusive with I(device_service_tag), I(specific_undiscovered_devices), I(any_undiscovered_devices) and I(all_devices) .
+ - This is applicable only when I(state) is C(present)
+ type: list
+ elements: str
+ specific_undiscovered_devices:
+ description:
+ - List of undiscovered IPs, hostnames, or range of IPs of devices on which the alert policy is applicable.
+ - This option is mutually exclusive with I(device_service_tag), I(device_group), I(any_undiscovered_devices) and I(all_devices) .
+ - This is applicable only when I(state) is C(present)
+ - "Examples of valid IP range format:"
+ - " 10.35.0.0"
+ - " 10.36.0.0-10.36.0.255"
+ - " 10.37.0.0/24"
+ - " 2607:f2b1:f083:135::5500/118"
+ - " 2607:f2b1:f083:135::a500-2607:f2b1:f083:135::a600"
+ - " hostname.domain.com"
+ - "Examples of invalid IP range format:"
+ - " 10.35.0.*"
+ - " 10.36.0.0-255"
+ - " 10.35.0.0/255.255.255.0"
+ - These values will not be validated.
+ type: list
+ elements: str
+ any_undiscovered_devices:
+ description:
+ - This option indicates whether the alert policy is applicable to any undiscovered devices or not.
+ - This option is mutually exclusive with I(device_service_tag), I(specific_undiscovered_devices), I(device_group) and I(all_devices).
+ - This is applicable only when I(state) is C(present).
+ type: bool
+ all_devices:
+ description:
+ - This option indicates whether the alert policy is applicable to all the discovered and undiscovered devices or not.
+ - This option is mutually exclusive with I(device_service_tag), I(specific_undiscovered_devices), I(any_undiscovered_devices) and I(device_group).
+ - This is applicable only when I(state) is C(present).
+ type: bool
+ category:
+ description:
+ - Category of the alerts received.
+ - This is mutually exclusive with the I(message_ids), I(message_file).
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_category_info).
+ - This is applicable only when I(state) is C(present).
+ type: list
+ elements: dict
+ suboptions:
+ catalog_name:
+ description: Name of the catalog.
+ type: str
+ required: true
+ catalog_category:
+ description: Category of the catalog.
+ type: list
+ elements: dict
+ suboptions:
+ category_name:
+ description: Name of the category.
+ type: str
+ sub_category_names:
+ description: List of sub-categories.
+ type: list
+ elements: str
+ message_ids:
+ description:
+ - List of Message ids
+ - This is mutually exclusive with the I(category), I(message_file)
+ - This is applicable only when I(state) is C(present)
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_message_id_info).
+ type: list
+ elements: str
+ message_file:
+ description:
+ - Local path of a CSV formatted file with message IDs
+ - This is mutually exclusive with the I(category), I(message_ids)
+ - This is applicable only when I(state) is C(present)
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_message_id_info).
+ type: path
+ date_and_time:
+ description:
+ - Specifies the schedule for when the alert policy is applicable.
+ - I(date_and_time) is mandatory for creating a policy and optional when updating a policy.
+ - This is applicable only when I(state) is C(present).
+ type: dict
+ suboptions:
+ date_from:
+ description:
+ - "Start date in the format YYYY-MM-DD."
+ - This parameter to be provided in quotes.
+ type: str
+ required: true
+ date_to:
+ description:
+ - "End date in the format YYYY-MM-DD."
+ - This parameter to be provided in quotes.
+ type: str
+ time_from:
+ description:
+ - "Interval start time in the format HH:MM"
+ - This parameter to be provided in quotes.
+ - This is mandatory when I(time_interval) is C(true).
+ type: str
+ time_to:
+ description:
+ - "Interval end time in the format HH:MM"
+ - This parameter to be provided in quotes.
+ - This is mandatory when I(time_interval) is C(true)
+ type: str
+ days:
+ description: Required days of the week on which alert policy operation must be scheduled.
+ type: list
+ elements: str
+ choices: [monday, tuesday, wednesday, thursday, friday, saturday, sunday]
+ time_interval:
+ description: Enable the time interval for which alert policy must be scheduled.
+ type: bool
+ severity:
+ description:
+ - Severity of the alert policy.
+ - This is mandatory for creating a policy and optional for updating a policy.
+ - This is applicable only when I(state) is C(present).
+ type: list
+ elements: str
+ choices: [all, unknown, info, normal, warning, critical]
+ actions:
+ description:
+ - Actions to be triggered for the alert policy.
+ - This parameter is case-sensitive.
+ - This is mandatory for creating a policy and optional for updating a policy.
+ - This is applicable only when I(state) is C(present)
+ type: list
+ elements: dict
+ suboptions:
+ action_name:
+ description:
+ - Name of the action.
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_action_info).
+ - This is mandatory for creating a policy and optional for updating a policy.
+ - This parameter is case-sensitive.
+ type: str
+ required: true
+ parameters:
+ description:
+ - Predefined parameters required to set for I(action_name).
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ name:
+ description:
+ - Name of the predefined parameter.
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_action_info).
+ type: str
+ value:
+ description:
+ - Value of the predefined parameter.
+ - These values will not be validated.
+ type: str
+requirements:
+ - "python >= 3.9.6"
+author: "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: "Create an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Alert Policy One"
+ device_service_tag:
+ - ABCD123
+ - SVC7845
+ category:
+ - catalog_name: Application
+ catalog_category:
+ - category_name: Audit
+ sub_category_names:
+ - Generic
+ - Devices
+ - catalog_name: iDRAC
+ catalog_category:
+ - category_name: Audit
+ sub_category_names:
+ - BIOS Management
+ - iDRAC Service Module
+ date_and_time:
+ date_from: "2023-10-10"
+ date_to: "2023-10-11"
+ time_from: "11:00"
+ time_to: "12:00"
+ severity:
+ - unknown
+ - critical
+ actions:
+ - action_name: Trap
+ parameters:
+ - name: "192.1.2.3:162"
+ value: true
+ - name: "traphostname.domain.com:162"
+ value: true
+ tags: create_alert_policy
+
+- name: "Update an alert Policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ new_name: "Update Policy Name"
+ device_group: "Group Name"
+ message_ids:
+ - AMP400
+ - CTL201
+ - BIOS101
+ date_and_time:
+ date_from: "2023-10-10"
+ date_to: "2023-10-11"
+ time_from: "11:00"
+ time_to: "12:00"
+ time_interval: true
+ actions:
+ - action_name: Trap
+ parameters:
+ - name: "192.1.2.3:162"
+ value: true
+ tags: update_alert_policy
+
+- name: "Enable an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Policy Name"
+ enable: true
+ tags: enable_alert_policy
+
+- name: "Disable multiple alert policies"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name:
+ - "Policy Name 1"
+ - "Policy Name 2"
+ enable: false
+ tags: disable_alert_policy
+
+- name: "Delete an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name:
+ - "Policy Name"
+ state: absent
+ tags: delete_alert_policy
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the alert policies operation.
+ returned: always
+ sample: "Successfully created the alert policy."
+status:
+ type: dict
+ description: The policy which was created or modified.
+ returned: when state is present
+ sample: {
+ "Id": 12345,
+ "Name": "Policy",
+ "Description": "Details of the Policy",
+ "Enabled": true,
+ "DefaultPolicy": false,
+ "Editable": true,
+ "Visible": true,
+ "PolicyData": {
+ "Catalogs": [
+ {
+ "CatalogName": "iDRAC",
+ "Categories": [
+ 4
+ ],
+ "SubCategories": [
+ 41
+ ]
+ },
+ {
+ "CatalogName": "Application",
+ "Categories": [
+ 0
+ ],
+ "SubCategories": [
+ 0
+ ]
+ }
+ ],
+ "Severities": [
+ 16,
+ 1,
+ 2,
+ 4,
+ 8
+ ],
+ "Devices": [
+ 10086,
+ 10088
+ ],
+ "DeviceTypes": [
+ 1000,
+ 2000
+ ],
+ "Groups": [],
+ "Schedule": {
+ "StartTime": "2023-06-06 15:02:46.000",
+ "EndTime": "2023-06-06 18:02:46.000",
+ "CronString": "* * * ? * * *"
+ },
+ "Actions": [
+ {
+ "Id": 8,
+ "Name": "Email",
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "subject",
+ "Value": "Device Name: $name, Device IP Address: $ip, Severity: $severity",
+ "Type": "string",
+ "TypeParams": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 1,
+ "Name": "to",
+ "Value": "test@org.com",
+ "Type": "string",
+ "TypeParams": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 1,
+ "Name": "from",
+ "Value": "abc@corp.com",
+ "Type": "string",
+ "TypeParams": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 1,
+ "Name": "message",
+ "Value": "Event occurred for Device Name: $name, Device IP Address: $ip",
+ "Type": "string",
+ "TypeParams": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "UndiscoveredTargets": [],
+ "State": true,
+ "Owner": 10069
+ }
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CMON7011",
+ "RelatedProperties": [],
+ "Message": "Unable to create or modify the alert policy because an invalid value [To Email] is entered for the action Email.",
+ "MessageArgs": [
+ "[To Email]",
+ "Email"
+ ],
+ "Severity": "Warning",
+ "Resolution": "Enter a valid value for the action identified in the message and retry the operation."
+ }
+ ]
+ }
+}
+'''
+
+import csv
+import os
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination, strip_substr_dict
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from datetime import datetime
+
+
+POLICIES_URI = "AlertService/AlertPolicies"
+MESSAGES_URI = "AlertService/AlertMessageDefinitions"
+ACTIONS_URI = "AlertService/AlertActionTemplates"
+SEVERITY_URI = "AlertService/AlertSeverities"
+DEVICES_URI = "DeviceService/Devices"
+GROUPS_URI = "GroupService/Groups"
+REMOVE_URI = "AlertService/Actions/AlertService.RemoveAlertPolicies"
+ENABLE_URI = "AlertService/Actions/AlertService.EnableAlertPolicies"
+DISABLE_URI = "AlertService/Actions/AlertService.DisableAlertPolicies"
+CATEGORY_URI = "AlertService/AlertCategories"
+SUCCESS_MSG = "Successfully {0}d the alert policy."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_MSG = "Changes found to be applied."
+INVALID_TIME = "The specified {0} date or {0} time `{1}` to schedule the policy is not valid. Enter a valid date and time."
+END_START_TIME = "The end time `{0}` to schedule the policy must be greater than the start time `{1}`."
+CATEGORY_FETCH_FAILED = "Unable to retrieve the category details from OpenManage Enterprise."
+INVALID_TARGETS = "Specify target devices to apply the alert policy."
+INVALID_CATEGORY_MESSAGE = "Specify categories or message to create the alert policy."
+INVALID_SCHEDULE = "Specify a date and time to schedule the alert policy."
+INVALID_ACTIONS = "Specify alert actions for the alert policy."
+INVALID_SEVERITY = "Specify the severity to create the alert policy."
+MULTIPLE_POLICIES = "Unable to update the alert policies because the number of alert policies entered are more than " \
+ "one. The update policy operation supports only one alert policy at a time."
+DISABLED_ACTION = "Action {0} is disabled. Enable it before applying to the alert policy."
+ACTION_INVALID_PARAM = "The Action {0} attribute contains invalid parameter name {1}. The valid values are {2}."
+ACTION_INVALID_VALUE = "The Action {0} attribute contains invalid value for {1} for parameter name {2}. The valid " \
+ "values are {3}."
+ACTION_DIS_EXIST = "Action {0} does not exist."
+SUBCAT_IN_CATEGORY = "The subcategory {0} does not exist in the category {1}."
+CATEGORY_IN_CATALOG = "The category {0} does not exist in the catalog {1}."
+OME_DATA_MSG = "The {0} with the following {1} do not exist: {2}."
+CATALOG_DIS_EXIST = "The catalog {0} does not exist."
+CSV_PATH = "The message file {0} does not exist."
+DEFAULT_POLICY_DELETE = "The following default policies cannot be deleted: {0}."
+POLICY_ENABLE_MISSING = "Unable to {0} the alert policies {1} because the policy names are invalid. Enter the valid " \
+ "alert policy names and retry the operation."
+NO_POLICY_EXIST = "The alert policy does not exist."
+SEPARATOR = ", "
+
+
+def get_alert_policies(rest_obj, name_list):
+ report = get_all_data_with_pagination(rest_obj, POLICIES_URI)
+ all_policies = report.get("report_list", [])
+ policies = []
+ nameset = set(name_list)
+ for policy in all_policies:
+ if policy.get("Name") in nameset:
+ policies.append(policy)
+ return policies
+
+
+def get_items_to_remove(filter_param, return_param_tuple, return_dict, all_items, mset):
+ collector = set()
+ for dev in all_items:
+ k = dev.get(filter_param)
+ if k in mset:
+ for v in return_param_tuple:
+ return_dict[v].append(dev.get(v))
+ collector.add(k)
+ return collector
+
+
+def validate_ome_data(module, rest_obj, item_list, filter_param, return_param_tuple, ome_uri, item_name="Items"):
+ mset = set(item_list)
+ return_dict = {v: [] for v in return_param_tuple}
+ # can be further optimized if len(mset) == 1
+ resp = rest_obj.invoke_request("GET", ome_uri)
+ all_items = resp.json_data.get("value", [])
+ dvdr = len(all_items) if len(all_items) else 100
+ collector = get_items_to_remove(filter_param, return_param_tuple, return_dict, all_items, mset)
+ mset = mset - collector
+ all_item_count = resp.json_data.get("@odata.count")
+ next_link = resp.json_data.get("@odata.nextLink")
+ if mset and next_link:
+ if len(mset) < (all_item_count // dvdr):
+ for item_id in mset:
+ query_param = {"$filter": f"{filter_param} eq '{item_id}'"}
+ resp = rest_obj.invoke_request('GET', ome_uri, query_param=query_param)
+ one_item = resp.json_data.get("value", [])
+ collector = collector | get_items_to_remove(filter_param, return_param_tuple, return_dict, one_item, mset)
+ mset = mset - collector
+ else:
+ while next_link and mset:
+ resp = rest_obj.invoke_request('GET', next_link.lstrip("/api"))
+ all_items = resp.json_data.get("value", [])
+ collector = get_items_to_remove(filter_param, return_param_tuple, return_dict, all_items, mset)
+ mset = mset - collector
+ next_link = resp.json_data.get("@odata.nextLink", None)
+ if mset:
+ module.exit_json(failed=True,
+ msg=OME_DATA_MSG.format(item_name, filter_param, SEPARATOR.join(mset)))
+ ret_list = [(return_dict[id]) for id in return_param_tuple]
+ return tuple(ret_list)
+
+
+def get_target_payload(module, rest_obj):
+ target_payload = {'AllTargets': False,
+ 'DeviceTypes': [],
+ 'Devices': [],
+ 'Groups': [],
+ 'UndiscoveredTargets': []}
+ mparams = module.params
+ target_provided = False
+ if mparams.get('all_devices'):
+ target_payload['AllTargets'] = True
+ target_provided = True
+ elif mparams.get('any_undiscovered_devices'):
+ target_payload['UndiscoveredTargets'] = ["ALL_UNDISCOVERED_TARGETS"]
+ target_provided = True
+ elif mparams.get('specific_undiscovered_devices'):
+ target_payload['UndiscoveredTargets'] = list(set(module.params.get('specific_undiscovered_devices')))
+ target_payload['UndiscoveredTargets'].sort()
+ target_provided = True
+ elif mparams.get('device_service_tag'):
+ devicetype, deviceids = validate_ome_data(module, rest_obj, mparams.get('device_service_tag'),
+ 'DeviceServiceTag', ('Type', 'Id'), DEVICES_URI, 'devices')
+ target_payload['Devices'] = deviceids
+ target_payload['Devices'].sort()
+ target_payload['DeviceTypes'] = list(set(devicetype))
+ target_payload['DeviceTypes'].sort()
+ target_provided = True
+ elif mparams.get('device_group'):
+ groups = validate_ome_data(module, rest_obj, mparams.get('device_group'), 'Name', ('Id',), GROUPS_URI, 'groups')
+ target_payload['Groups'] = groups[0]
+ target_payload['Groups'].sort()
+ target_provided = True
+ if not target_provided:
+ target_payload = {}
+ return target_payload
+
+
+def get_category_data_tree(rest_obj):
+ resp = rest_obj.invoke_request("GET", CATEGORY_URI)
+ cat_raw = resp.json_data.get("value", [])
+ cat_dict = dict(
+ (category.get("Name"),
+ dict((y.get("Name"),
+ {y.get("Id"): dict((z.get('Name'), z.get('Id')
+ ) for z in y.get("SubCategoryDetails"))}
+ ) for y in category.get("CategoriesDetails")
+ )
+ ) for category in cat_raw
+ )
+ return cat_dict
+
+
+def get_all_actions(rest_obj):
+ resp = rest_obj.invoke_request("GET", ACTIONS_URI)
+ actions = resp.json_data.get("value", [])
+ cmp_actions = dict((x.get("Name"), {"Id": x.get("Id"),
+ "Disabled": x.get("Disabled"),
+ "Parameters": dict((y.get("Name"), y.get("Value")) for y in x.get("ParameterDetails")),
+ "Type": dict((y.get("Name"),
+ ["true", "false"]
+ if y.get("Type") == "boolean"
+ else [z.get("Value") for z in y.get("TemplateParameterTypeDetails")
+ if y.get("Type") != "string"]) for y in x.get("ParameterDetails"))
+ }
+ ) for x in actions)
+ return cmp_actions
+
+
+def validate_time(module, time, time_format, time_type):
+ try:
+ ftime = datetime.strptime(time, time_format)
+ except ValueError:
+ module.exit_json(failed=True, msg=INVALID_TIME.format(time_type, time))
+ return ftime
+
+
+def get_ftime(module, inp_schedule, time_type, time_interval):
+ def_time = "00:00"
+ time_format = "%Y-%m-%d %H:%M:%S.%f"
+ hhmm = inp_schedule.get(f"time_{time_type}") if time_interval else def_time
+ date_x = inp_schedule.get(f"date_{time_type}")
+ time_x = None
+ if date_x:
+ dtime = f"{date_x} {hhmm}:00.000"
+ time_x = validate_time(module, dtime, time_format, time_type)
+ elif time_interval:
+ dtime = f"{hhmm}:00.000"
+ else:
+ dtime = ""
+ return dtime, time_x
+
+
+def get_schedule_payload(module):
+ schedule_payload = {}
+ inp_schedule = module.params.get('date_and_time')
+ if inp_schedule:
+ time_interval = bool(inp_schedule.get('time_interval'))
+ schedule_payload['Interval'] = time_interval
+ schedule_payload["StartTime"], start_time_x = get_ftime(module, inp_schedule, "from", time_interval)
+ schedule_payload["EndTime"], end_time_x = get_ftime(module, inp_schedule, "to", time_interval)
+ if inp_schedule.get('date_to') and end_time_x < start_time_x:
+ module.exit_json(failed=True, msg=END_START_TIME.format(end_time_x, start_time_x))
+ weekdays = {'monday': 'mon', 'tuesday': 'tue', 'wednesday': 'wed', 'thursday': 'thu', 'friday': 'fri',
+ 'saturday': 'sat', 'sunday': 'sun'}
+ inp_week_list = ['*']
+ cron_sep = ","
+ if inp_schedule.get('days'):
+ week_order = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
+ inp_week_list = sorted(list(set(inp_schedule.get('days'))), key=week_order.index)
+ schedule_payload["CronString"] = f"* * * ? * {cron_sep.join([weekdays.get(x, '*') for x in inp_week_list])} *"
+ return {"Schedule": schedule_payload} if schedule_payload else {}
+
+
+def create_action_payload(inp_k, inp_val, ref_actions, module):
+ if ref_actions.get(inp_k).get('Disabled'):
+ module.exit_json(failed=True, msg=DISABLED_ACTION.format(inp_k))
+ pld = {
+ 'TemplateId': ref_actions.get(inp_k).get('Id'),
+ 'Name': inp_k,
+ 'ParameterDetails': {}
+ }
+ diff = set(inp_val.keys()) - set(ref_actions.get(inp_k).get('Parameters').keys())
+ if diff:
+ module.exit_json(failed=True,
+ msg=ACTION_INVALID_PARAM.format(
+ inp_k, SEPARATOR.join(diff), SEPARATOR.join(ref_actions.get(inp_k).get('Parameters').keys())))
+ for sub_k, sub_val in inp_val.items():
+ valid_values = ref_actions.get(inp_k).get('Type').get(sub_k)
+ if valid_values:
+ if str(sub_val).lower() not in valid_values:
+ module.exit_json(failed=True, msg=ACTION_INVALID_VALUE.format(inp_k, sub_val, sub_k, SEPARATOR.join(valid_values)))
+ else:
+ inp_val[sub_k] = str(sub_val).lower() if str(sub_val).lower() in ("true", "false") else sub_val
+ pld['ParameterDetails'] = inp_val
+ return pld
+
+
+def get_actions_payload(module, rest_obj):
+ action_payload = {}
+ inp_actions = module.params.get('actions')
+ if inp_actions:
+ ref_actions = get_all_actions(rest_obj)
+ inp_dict = {x.get("action_name"): {y.get("name"): y.get("value")
+ for y in x.get("parameters", [])} for x in inp_actions}
+ if 'Ignore' in inp_dict:
+ action_payload['Ignore'] = {'TemplateId': ref_actions.get('Ignore').get('Id'),
+ 'Name': "Ignore",
+ 'ParameterDetails': {}}
+ else:
+ for inp_k, inp_val in inp_dict.items():
+ if inp_k in ref_actions:
+ action_payload[inp_k] = create_action_payload(inp_k, inp_val, ref_actions, module)
+ else:
+ module.exit_json(failed=True, msg=ACTION_DIS_EXIST.format(inp_k))
+ return {"Actions": action_payload} if action_payload else {}
+
+
+def load_subcategory_data(module, inp_sub_cat_list, sub_cat_dict, key_id, payload_cat, payload_subcat, inp_category):
+ if inp_sub_cat_list:
+ for sub_cat in inp_sub_cat_list:
+ if sub_cat in sub_cat_dict:
+ payload_cat.append(key_id)
+ payload_subcat.append(
+ sub_cat_dict.get(sub_cat))
+ else:
+ module.exit_json(failed=True, msg=SUBCAT_IN_CATEGORY.format(sub_cat, inp_category.get('category_name')))
+ else:
+ payload_cat.append(key_id)
+ payload_subcat.append(0)
+
+
+def load_category_data(module, catalog_name, category_list, category_det, payload_cat, payload_subcat):
+ if category_list:
+ for inp_category in category_list:
+ if inp_category.get('category_name') in category_det:
+ resp_category_dict = category_det.get(inp_category.get('category_name'))
+ key_id = list(resp_category_dict.keys())[0]
+ sub_cat_dict = resp_category_dict.get(key_id)
+ inp_sub_cat_list = inp_category.get('sub_category_names')
+ load_subcategory_data(module, inp_sub_cat_list, sub_cat_dict, key_id, payload_cat, payload_subcat, inp_category)
+ else:
+ module.exit_json(failed=True, msg=CATEGORY_IN_CATALOG.format(inp_category.get('category_name'), catalog_name))
+ else:
+ payload_cat.append(0)
+ payload_subcat.append(0)
+
+
+def get_category_payloadlist(module, inp_catalog_list, cdict_ref):
+ payload_cat_list = []
+ for inp_catalog in inp_catalog_list:
+ new_dict = {}
+ catalog_name = inp_catalog.get('catalog_name')
+ if catalog_name in cdict_ref:
+ new_dict["CatalogName"] = catalog_name
+ payload_cat = []
+ category_det = cdict_ref.get(catalog_name)
+ payload_subcat = []
+ category_list = inp_catalog.get('catalog_category')
+ load_category_data(module, catalog_name, category_list, category_det, payload_cat, payload_subcat)
+ new_dict["Categories"] = payload_cat
+ new_dict['SubCategories'] = payload_subcat
+ else:
+ module.exit_json(failed=True, msg=CATALOG_DIS_EXIST.format(catalog_name))
+ payload_cat_list.append(new_dict)
+ return payload_cat_list
+
+
+def get_category_payload(module, rest_obj):
+ inp_catalog_list = module.params.get('category')
+ cdict_ref = get_category_data_tree(rest_obj)
+ if not cdict_ref:
+ module.exit_json(failed=True, msg=CATEGORY_FETCH_FAILED)
+ payload_cat_list = get_category_payloadlist(module, inp_catalog_list, cdict_ref)
+ return payload_cat_list
+
+
+def get_message_payload(module):
+ mlist = []
+ if module.params.get('message_file'):
+ csvpath = module.params.get('message_file')
+ if not os.path.isfile(csvpath):
+ module.exit_json(
+ failed=True, msg=CSV_PATH.format(csvpath))
+ with open(csvpath) as csvfile:
+ spamreader = csv.reader(csvfile)
+ for row in spamreader:
+ mlist.extend(row)
+ if mlist[0].lower().startswith('message'):
+ mlist.pop(0)
+ elif module.params.get('message_ids'):
+ mlist = module.params.get('message_ids')
+ return mlist
+
+
+def get_category_or_message(module, rest_obj):
+ cat_payload = {"Catalogs": {},
+ "MessageIds": []}
+ cat_msg_provided = False
+ if module.params.get('category'):
+ payload_cat_list = get_category_payload(module, rest_obj)
+ cat_dict = dict((x.get('CatalogName'), x) for x in payload_cat_list)
+ cat_msg_provided = True
+ cat_payload['Catalogs'] = cat_dict
+ else:
+ mlist = get_message_payload(module)
+ if mlist:
+ validate_ome_data(module, rest_obj, mlist, 'MessageId', ('MessageId',), MESSAGES_URI, 'messages')
+ cat_msg_provided = True
+ cat_payload['MessageIds'] = list(set(mlist))
+ cat_payload['MessageIds'].sort()
+ if not cat_msg_provided:
+ cat_payload = {}
+ return cat_payload
+
+
+def get_severity_payload(module, rest_obj):
+ try:
+ resp = rest_obj.invoke_request("GET", SEVERITY_URI)
+ severity_dict = dict((x.get('Name').lower(), x.get('Id'))
+ for x in resp.json_data.get("Value"))
+ except Exception:
+ severity_dict = {"unknown": 1, "info": 2,
+ "normal": 4, "warning": 8, "critical": 16}
+ inp_sev_list = module.params.get('severity')
+ sev_payload = {}
+ if inp_sev_list:
+ if 'all' in inp_sev_list:
+ sev_payload = {"Severities": list(severity_dict.values())}
+ else:
+ sev_payload = {"Severities": [
+ severity_dict.get(x) for x in inp_sev_list]}
+ sev_payload['Severities'].sort()
+ return sev_payload
+
+
+def transform_existing_policy_data(policy):
+ pdata = policy.get('PolicyData')
+ undiscovered = pdata.get('UndiscoveredTargets')
+ if undiscovered:
+ pdata['UndiscoveredTargets'] = [x.get('TargetAddress') for x in undiscovered]
+ actions = pdata.get('Actions')
+ if actions:
+ for action in actions:
+ if action.get('Name') == "RemoteCommand":
+ # Special case handling for RemoteCommand, appends 1 after every post call to "remotecommandaction"
+ action['ParameterDetails'] = dict((str(act_param.get('Name')).rstrip('1'), act_param.get('Value'))
+ for act_param in action.get('ParameterDetails', []))
+ else:
+ action['ParameterDetails'] = dict((act_param.get('Name'), act_param.get('Value'))
+ for act_param in action.get('ParameterDetails', []))
+ action.pop('Id', None)
+ pdata['Actions'] = dict((x.get('Name'), x) for x in actions)
+ catalogs = pdata.get('Catalogs')
+ pdata['Catalogs'] = dict((x.get('CatalogName'), x) for x in catalogs)
+ # for Devices, DeviceTypes, Groups, Severities
+ for pol_data in pdata.values():
+ if isinstance(pol_data, list):
+ pol_data.sort()
+ messages = pdata.get('MessageIds', [])
+ pdata['MessageIds'] = [m.strip("'") for m in messages]
+
+
+def format_payload(policy):
+ pdata = policy.get('PolicyData')
+ undiscovered = pdata.get('UndiscoveredTargets')
+ if undiscovered:
+ pdata['UndiscoveredTargets'] = [({"TargetAddress": x}) for x in undiscovered]
+ actions = pdata.get('Actions')
+ if actions:
+ for action in actions.values():
+ action['ParameterDetails'] = [
+ {"Name": k, "Value": v} for k, v in action.get('ParameterDetails', {}).items()]
+ pdata['Actions'] = list(actions.values())
+ catalogs = pdata.get('Catalogs')
+ pdata['Catalogs'] = list(catalogs.values())
+
+
+def compare_policy_payload(module, rest_obj, policy):
+ diff = 0
+ new_payload = {}
+ new_policy_data = {}
+ new_payload["PolicyData"] = new_policy_data
+ transform_existing_policy_data(policy)
+ payload_items = []
+ payload_items.append(get_target_payload(module, rest_obj))
+ payload_items.append(get_category_or_message(module, rest_obj))
+ payload_items.append(get_actions_payload(module, rest_obj))
+ payload_items.append(get_schedule_payload(module))
+ payload_items.append(get_severity_payload(module, rest_obj))
+ for payload in payload_items:
+ if payload:
+ new_policy_data.update(payload)
+ diff_tuple = recursive_diff(new_payload['PolicyData'], policy['PolicyData'])
+ if diff_tuple and diff_tuple[0]:
+ diff = diff + 1
+ policy['PolicyData'].update(payload)
+ if module.params.get('new_name'):
+ new_payload['Name'] = module.params.get('new_name')
+ if module.params.get('description'):
+ new_payload['Description'] = module.params.get('description')
+ if module.params.get('enable') is not None:
+ new_payload['Enabled'] = module.params.get('enable')
+ policy = strip_substr_dict(policy)
+ new_payload.pop('PolicyData', None)
+ diff_tuple = recursive_diff(new_payload, policy)
+ if diff_tuple and diff_tuple[0]:
+ diff = diff + 1
+ policy.update(diff_tuple[0])
+ return diff
+
+
+def get_policy_data(module, rest_obj):
+ policy_data = {}
+ target = get_target_payload(module, rest_obj)
+ if not target:
+ module.exit_json(failed=True, msg=INVALID_TARGETS)
+ policy_data.update(target)
+ cat_msg = get_category_or_message(module, rest_obj)
+ if not cat_msg:
+ module.exit_json(failed=True, msg=INVALID_CATEGORY_MESSAGE)
+ policy_data.update(cat_msg)
+ schedule = get_schedule_payload(module)
+ if not schedule:
+ module.exit_json(failed=True, msg=INVALID_SCHEDULE)
+ policy_data.update(schedule)
+ actions = get_actions_payload(module, rest_obj)
+ if not actions:
+ module.exit_json(failed=True, msg=INVALID_ACTIONS)
+ policy_data.update(actions)
+ sev_payload = get_severity_payload(module, rest_obj)
+ if not sev_payload.get('Severities'):
+ module.exit_json(failed=True, msg=INVALID_SEVERITY)
+ policy_data.update(sev_payload)
+ return policy_data
+
+
+def remove_policy(module, rest_obj, policies):
+ id_list = [x.get("Id")
+ for x in policies if x.get("DefaultPolicy") is False]
+ if len(id_list) != len(policies):
+ module.exit_json(failed=True,
+ msg=DEFAULT_POLICY_DELETE.format(SEPARATOR.join([x.get('Name') for x in policies if x.get('DefaultPolicy')])))
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ rest_obj.invoke_request("POST", REMOVE_URI, data={
+ "AlertPolicyIds": id_list})
+ module.exit_json(changed=True, msg=SUCCESS_MSG.format("delete"))
+
+
+def enable_toggle_policy(module, rest_obj, policies):
+ enabler = module.params.get('enable')
+ id_list = [x.get("Id") for x in policies if x.get("Enabled") is not enabler]
+ if not id_list:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ uri = ENABLE_URI if enabler else DISABLE_URI
+ rest_obj.invoke_request("POST", uri, data={"AlertPolicyIds": id_list})
+ module.exit_json(changed=True, msg=SUCCESS_MSG.format("enable" if enabler else "disable"))
+
+
+def update_policy(module, rest_obj, policy):
+ diff = compare_policy_payload(module, rest_obj, policy)
+ if not diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ format_payload(policy)
+ resp = rest_obj.invoke_request("PUT", f"{POLICIES_URI}({policy.get('Id')})", data=policy)
+ module.exit_json(changed=True, msg=SUCCESS_MSG.format("update"),
+ status=resp.json_data)
+
+
+def create_policy(module, rest_obj):
+ create_payload = {}
+ policy_data = get_policy_data(module, rest_obj)
+ create_payload['PolicyData'] = policy_data
+ create_payload['Name'] = module.params.get('name')[0]
+ create_payload['Description'] = module.params.get('description')
+ create_payload['Enabled'] = module.params.get(
+ 'enable') if module.params.get('enable', True) is not None else True
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ format_payload(create_payload)
+ resp = rest_obj.invoke_request("POST", POLICIES_URI, data=create_payload)
+ module.exit_json(changed=True, msg=SUCCESS_MSG.format(
+ "create"), status=resp.json_data)
+
+
+def handle_policy_enable(module, rest_obj, policies, name_list):
+ if len(policies) == len(name_list):
+ enable_toggle_policy(module, rest_obj, policies)
+ else:
+ invalid_policies = set(name_list) - set(x.get("Name") for x in policies)
+ enabler = module.params.get('enable')
+ module.exit_json(failed=True, msg=POLICY_ENABLE_MISSING.format("enable" if enabler else "disable", SEPARATOR.join(invalid_policies)))
+
+
+def handle_absent_state(module, rest_obj, policies):
+ if policies:
+ remove_policy(module, rest_obj, policies)
+ else:
+ module.exit_json(msg=NO_POLICY_EXIST)
+
+
+def handle_present_state(module, rest_obj, policies, name_list, present_args):
+ present_args.remove('enable')
+ enable = module.params.get('enable')
+ if not any(module.params.get(prm) is not None for prm in present_args) and enable is not None:
+ handle_policy_enable(module, rest_obj, policies, name_list)
+ if len(name_list) > 1:
+ module.exit_json(failed=True, msg=MULTIPLE_POLICIES)
+ if policies:
+ update_policy(module, rest_obj, policies[0])
+ else:
+ create_policy(module, rest_obj)
+
+
+def main():
+ specs = {
+ "name": {'type': 'list', 'elements': 'str', 'required': True},
+ "state": {'default': 'present', 'choices': ['present', 'absent'], 'type': 'str'},
+ "enable": {'type': 'bool'},
+ "new_name": {'type': 'str'},
+ "description": {'type': 'str'},
+ "device_service_tag": {'type': 'list', 'elements': 'str'},
+ "device_group": {'type': 'list', 'elements': 'str'},
+ "specific_undiscovered_devices": {'type': 'list', 'elements': 'str'},
+ "any_undiscovered_devices": {'type': 'bool'},
+ "all_devices": {'type': 'bool'},
+ "category": {'type': 'list', 'elements': 'dict',
+ 'options': {'catalog_name': {'type': 'str', 'required': True},
+ 'catalog_category': {'type': 'list', 'elements': 'dict',
+ 'options': {'category_name': {'type': 'str'},
+ 'sub_category_names': {'type': 'list', 'elements': 'str'}
+ },
+ }
+ }
+ },
+ "message_ids": {'type': 'list', 'elements': 'str'},
+ "message_file": {'type': 'path'},
+ "date_and_time": {'type': 'dict',
+ 'options': {'date_from': {'type': 'str', 'required': True},
+ 'date_to': {'type': 'str'},
+ 'time_from': {'type': 'str'},
+ 'time_to': {'type': 'str'},
+ 'days': {'type': 'list', 'elements': 'str',
+ 'choices': ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']},
+ 'time_interval': {'type': 'bool'}
+ },
+ 'required_if': [['time_interval', True, ('time_from', 'time_to')]]
+ },
+ "severity": {'type': 'list', 'elements': 'str', 'choices': ['info', 'normal', 'warning', 'critical', 'unknown', 'all']},
+ "actions": {'type': 'list', 'elements': 'dict',
+ 'options': {'action_name': {'type': 'str', 'required': True},
+ 'parameters': {'type': 'list', 'elements': 'dict', 'default': [],
+ 'options': {'name': {'type': 'str'},
+ 'value': {'type': 'str'}}
+ }
+ }
+ }
+ }
+ specs.update(ome_auth_params)
+ present_args = ['enable', 'new_name', 'description', 'device_service_tag', 'device_group',
+ 'specific_undiscovered_devices', 'any_undiscovered_devices', 'all_devices',
+ 'category', 'message_ids', 'message_file', 'date_and_time', 'severity', 'actions']
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['state', 'present', present_args, True]],
+ mutually_exclusive=[('device_service_tag', 'device_group', 'any_undiscovered_devices', 'specific_undiscovered_devices', 'all_devices',),
+ ('message_ids', 'message_file', 'category',)],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ state = module.params.get('state')
+ name_list = list(set(module.params.get('name')))
+ policies = get_alert_policies(rest_obj, name_list)
+ if state == 'absent':
+ handle_absent_state(module, rest_obj, policies)
+ else:
+ handle_present_state(module, rest_obj, policies, name_list, present_args)
+ except HTTPError as err:
+ module.exit_json(failed=True, msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(failed=True, msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_actions_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_actions_info.py
new file mode 100644
index 000000000..0d1f0c726
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_actions_info.py
@@ -0,0 +1,290 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies_actions_info
+short_description: Get information on actions of alert policies.
+version_added: "8.2.0"
+description:
+ - This module retrieves the information on actions of alert policies for OpenManage Enterprise
+ and OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Kritika Bhateja (@Kritika-Bhateja-03)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Get action details of all alert policies.
+ dellemc.openmanage.ome_alert_policies_actions_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+'''
+
+RETURN = r'''
+---
+actions:
+ type: list
+ description: Returns the alert policies action information collected from the Device.
+ returned: success
+ sample: [
+ {
+ "Name": "Email",
+ "Description": "Email",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "subject",
+ "Value": "Device Name: $name, Device IP Address: $ip, Severity: $severity",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 2,
+ "Name": "to",
+ "Value": "",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 3,
+ "Name": "from",
+ "Value": "admin1@dell.com",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 4,
+ "Name": "message",
+ "Value": "Event occurred for Device Name: $name,
+ Device IP Address: $ip, Service Tag: $identifier, UTC Time: $time, Severity: $severity, Message ID: $messageId, $message",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 60,
+ "Name": "Trap",
+ "Description": "Trap",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "localhost:162",
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ },
+ {
+ "Id": 90,
+ "Name": "Syslog",
+ "Description": "Syslog",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "localhost.scomdev.com:555",
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ },
+ {
+ "Id": 2,
+ "Name": "localhost.scomdev.com:555",
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ },
+ {
+ "Id": 100,
+ "Name": "Ignore",
+ "Description": "Ignore",
+ "Disabled": false,
+ "ParameterDetails": []
+ },
+ {
+ "Id": 70,
+ "Name": "SMS",
+ "Description": "SMS",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "to",
+ "Value": "",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Id": 110,
+ "Name": "PowerControl",
+ "Description": "Power Control Action Template",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "powercontrolaction",
+ "Value": "poweroff",
+ "Type": "singleSelect",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "option",
+ "Value": "powercycle"
+ },
+ {
+ "Name": "option",
+ "Value": "poweroff"
+ },
+ {
+ "Name": "option",
+ "Value": "poweron"
+ },
+ {
+ "Name": "option",
+ "Value": "gracefulshutdown"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Id": 111,
+ "Name": "RemoteCommand",
+ "Description": "RemoteCommand",
+ "Disabled": true,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "remotecommandaction",
+ "Value": null,
+ "Type": "singleSelect",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ },
+ {
+ "Id": 112,
+ "Name": "Mobile",
+ "Description": "Mobile",
+ "Disabled": false,
+ "ParameterDetails": []
+ }
+ ]
+}
+ ]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+msg:
+ description: Status of the alert policies actions fetch operation.
+ returned: always
+ type: str
+ sample: Successfully retrieved alert policies actions information.
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination
+
+ACTIONS_URI = "AlertService/AlertActionTemplates"
+SUCCESSFUL_MSG = "Successfully retrieved alert policies actions information."
+EMPTY_ALERT_POLICY_ACTION_MSG = "No alert policies action information were found."
+
+
+def main():
+ """ function to retrieve the information on actions of alert policies """
+ specs = ome_auth_params
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ actions_info = get_all_data_with_pagination(rest_obj, ACTIONS_URI)
+ if not actions_info.get("report_list", []):
+ module.exit_json(msg=EMPTY_ALERT_POLICY_ACTION_MSG, actions=[])
+ actions = remove_key(actions_info['report_list'])
+ module.exit_json(msg=SUCCESSFUL_MSG, actions=actions)
+ except HTTPError as err:
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_category_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_category_info.py
new file mode 100644
index 000000000..6d3151fe9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_category_info.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies_category_info
+short_description: Retrieves information of all OME alert policy categories.
+version_added: "8.2.0"
+description: This module allows to retrieve all the alert policy categories for OpenManage Enterprise and OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieve information about all the OME alert policy categories
+ dellemc.openmanage.ome_alert_policies_category_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the alert policies category fetch operation.
+ returned: always
+ sample: "Successfully retrieved alert policies category information."
+categories:
+ type: list
+ description: Information about the alert categories.
+ returned: always
+ sample: [{
+ "CategoriesDetails": [
+ {
+ "CatalogName": "Application",
+ "Id": 5,
+ "Name": "Configuration",
+ "SubCategoryDetails": [
+ {
+ "Description": "Application",
+ "Id": 85,
+ "Name": "Application"
+ },
+ {
+ "Description": "Users",
+ "Id": 35,
+ "Name": "Users"
+ }
+ ]
+ },
+ {
+ "CatalogName": "Application",
+ "Id": 7,
+ "Name": "Miscellaneous",
+ "SubCategoryDetails": [
+ {
+ "Description": "Miscellaneous",
+ "Id": 20,
+ "Name": "Miscellaneous"
+ }
+ ]
+ },
+ {
+ "CatalogName": "Application",
+ "Id": 2,
+ "Name": "Storage",
+ "SubCategoryDetails": [
+ {
+ "Description": "Devices",
+ "Id": 90,
+ "Name": "Devices"
+ }
+ ]
+ },
+ {
+ "CatalogName": "Application",
+ "Id": 3,
+ "Name": "Updates",
+ "SubCategoryDetails": [
+ {
+ "Description": "Application",
+ "Id": 85,
+ "Name": "Application"
+ },
+ {
+ "Description": "Firmware",
+ "Id": 112,
+ "Name": "Firmware"
+ }
+ ]
+ }
+ ],
+ "IsBuiltIn": true,
+ "Name": "Application"
+ },
+ {
+ "CategoriesDetails": [
+ {
+ "CatalogName": "Dell Storage",
+ "Id": 2,
+ "Name": "Storage",
+ "SubCategoryDetails": [
+ {
+ "Description": "Other",
+ "Id": 7700,
+ "Name": "Other"
+ }
+ ]
+ },
+ {
+ "CatalogName": "Dell Storage",
+ "Id": 1,
+ "Name": "System Health",
+ "SubCategoryDetails": [
+ {
+ "Description": "Other",
+ "Id": 7700,
+ "Name": "Other"
+ },
+ {
+ "Description": "Storage",
+ "Id": 18,
+ "Name": "Storage"
+ }
+ ]
+ }
+ ],
+ "IsBuiltIn": true,
+ "Name": "Dell Storage"
+ },
+ {
+ "CategoriesDetails": [
+ {
+ "CatalogName": "iDRAC",
+ "Id": 4,
+ "Name": "Audit",
+ "SubCategoryDetails": [
+ {
+ "Description": "Auto System Reset",
+ "Id": 41,
+ "Name": "Auto System Reset"
+ },
+ {
+ "Description": "UEFI Event",
+ "Id": 55,
+ "Name": "UEFI Event"
+ },
+ {
+ "Description": "User Tracking",
+ "Id": 56,
+ "Name": "User Tracking"
+ }
+ ]
+ },
+ {
+ "CatalogName": "iDRAC",
+ "Id": 5,
+ "Name": "Configuration",
+ "SubCategoryDetails": [
+ {
+ "Description": "Auto-Discovery",
+ "Id": 49,
+ "Name": "Auto-Discovery"
+ },
+ {
+ "Description": "vFlash Event",
+ "Id": 66,
+ "Name": "vFlash Event"
+ },
+ {
+ "Description": "Virtual Console",
+ "Id": 7,
+ "Name": "Virtual Console"
+ }
+ ]
+ },
+ {
+ "CatalogName": "iDRAC",
+ "Id": 2,
+ "Name": "Storage",
+ "SubCategoryDetails": [
+ {
+ "Description": "Battery Event",
+ "Id": 108,
+ "Name": "Battery Event"
+ },
+ {
+ "Description": "Virtual Disk",
+ "Id": 46,
+ "Name": "Virtual Disk"
+ }
+ ]
+ },
+ {
+ "CatalogName": "iDRAC",
+ "Id": 1,
+ "Name": "System Health",
+ "SubCategoryDetails": [
+ {
+ "Description": "Amperage",
+ "Id": 67,
+ "Name": "Amperage"
+ },
+ {
+ "Description": "Auto System Reset",
+ "Id": 41,
+ "Name": "Auto System Reset"
+ },
+ {
+ "Description": "Voltage",
+ "Id": 40,
+ "Name": "Voltage"
+ }
+ ]
+ },
+ {
+ "CatalogName": "iDRAC",
+ "Id": 6,
+ "Name": "Work Notes",
+ "SubCategoryDetails": [
+ {
+ "Description": "BIOS Management",
+ "Id": 54,
+ "Name": "BIOS Management"
+ }
+ ]
+ }
+ ],
+ "IsBuiltIn": true,
+ "Name": "iDRAC"
+ }
+]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide
+ for more information about resource URI and its properties."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination
+
+ALERT_CATEGORY_URI = "AlertService/AlertCategories"
+SUCCESS_MSG = "Successfully retrieved alert policies category information."
+
+
+def get_formatted_categories(rest_obj):
+ report = get_all_data_with_pagination(rest_obj, ALERT_CATEGORY_URI)
+ categories = remove_key(report.get("report_list", []))
+ return categories
+
+
+def main():
+ specs = ome_auth_params
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ categories = get_formatted_categories(rest_obj)
+ module.exit_json(msg=SUCCESS_MSG, categories=categories)
+ except HTTPError as err:
+ module.exit_json(failed=True, msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(failed=True, msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_info.py
new file mode 100644
index 000000000..d9a97c070
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_info.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies_info
+short_description: Retrieves information of one or more OME alert policies.
+version_added: "8.2.0"
+description:
+ - This module retrieves the information of alert policies for OpenManage Enterprise
+ and OpenManage Enterprise Modular.
+ - A list of information about a specific OME alert policy using the policy name.
+ - A list of all the OME alert policies with their information when the policy name is not provided.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ policy_name:
+ description: Name of the policy.
+ type: str
+requirements:
+ - "python >= 3.9.6"
+author: "Abhishek Sinha(@ABHISHEK-SINHA10)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve information about all OME alert policies.
+ dellemc.openmanage.ome_alert_policies_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve information about a specific OME alert policy using the policy name.
+ dellemc.openmanage.ome_alert_policies_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ policy_name: "Mobile Push Notification - Critical Alerts"
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Status of the alert policies info fetch operation.
+ returned: always
+ sample: "Successfully retrieved all the OME alert policies information."
+policies:
+ type: list
+ description: Retrieve information about all the OME alert policies.
+ returned: success
+ sample: [
+ {
+ "Id": 10006,
+ "Name": "Mobile Push Notification - Critical Alerts",
+ "Description": "This policy is applicable to critical alerts. Associated actions will be taken when a critical alert is received.",
+ "Enabled": true,
+ "DefaultPolicy": true,
+ "PolicyData": {
+ "Catalogs": [],
+ "Severities": [
+ 16
+ ],
+ "MessageIds": [],
+ "Devices": [],
+ "DeviceTypes": [],
+ "Groups": [],
+ "AllTargets": false,
+ "Schedule": {
+ "StartTime": null,
+ "EndTime": null,
+ "CronString": null,
+ "Interval": false
+ },
+ "Actions": [
+ {
+ "Id": 5,
+ "Name": "Mobile",
+ "ParameterDetails": [],
+ "TemplateId": 112
+ }
+ ],
+ "UndiscoveredTargets": []
+ },
+ "State": true,
+ "Visible": true,
+ "Owner": null,
+ }
+]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+ALERT_POLICY_URI = "AlertService/AlertPolicies"
+MODULE_SUCCESS_MESSAGE_ALL = "Successfully retrieved all the OME alert policies information."
+MODULE_SUCCESS_MESSAGE_SPECIFIC = "Successfully retrieved {0} OME alert policy information."
+POLICY_NAME_NOT_FOUND_OR_EMPTY = "The OME alert policy name {0} provided does not exist or empty."
+
+
+class OMEAlertPolicyInfo:
+
+ def __init__(self) -> None:
+ self.module = get_module_parameters()
+
+ def get_all_alert_policy_info(self, rest_obj) -> dict:
+ resp = rest_obj.invoke_request("GET", ALERT_POLICY_URI)
+ value = resp.json_data["value"]
+ output_all = {'msg': MODULE_SUCCESS_MESSAGE_ALL, 'value': remove_key(value)}
+ return output_all
+
+ def get_alert_policy_info(self, rest_obj) -> dict:
+ policy_name = self.module.params.get("policy_name")
+ if policy_name is not None:
+ output_not_found_or_empty = {'msg': POLICY_NAME_NOT_FOUND_OR_EMPTY.format(policy_name),
+ 'value': []}
+ if policy_name == "":
+ return output_not_found_or_empty
+ policies = self.get_all_alert_policy_info(rest_obj)
+ for each_element in policies["value"]:
+ if each_element["Name"] == policy_name:
+ output_specific = {'msg': MODULE_SUCCESS_MESSAGE_SPECIFIC.format(policy_name),
+ 'value': [each_element]}
+ return output_specific
+ return output_not_found_or_empty
+ return self.get_all_alert_policy_info(rest_obj)
+
+ def perform_module_operation(self) -> None:
+ try:
+ with RestOME(self.module.params, req_session=True) as rest_obj:
+ result = self.get_alert_policy_info(rest_obj)
+ self.module.exit_json(msg=result['msg'], policies=result['value'])
+ except HTTPError as err:
+ self.module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ self.module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ self.module.exit_json(msg=str(err), failed=True)
+
+
+def get_module_parameters() -> AnsibleModule:
+ specs = {
+ "policy_name": {"type": 'str'}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=specs,
+ supports_check_mode=True)
+ return module
+
+
+def main():
+ obj = OMEAlertPolicyInfo()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_message_id_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_message_id_info.py
new file mode 100644
index 000000000..577eac7d0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_message_id_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies_message_id_info
+short_description: Get message ID information of alert policies.
+version_added: "8.2.0"
+description:
+ - "This module retrieves the message ID information of alert policies for OpenManage Enterprise
+ and OpenManage Enterprise Modular."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+requirements:
+ - "python >= 3.9.6"
+author: "Shivam Sharma (@ShivamSh3)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+ - This module supports IPv4 and IPv6 addresses.
+'''
+
+EXAMPLES = r'''
+---
+- name: Get message ID details of all alert policies
+ dellemc.openmanage.ome_alert_policies_message_id_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: "Status of the alert policies message ids fetch operation."
+ returned: always
+ type: str
+ sample: "Successfully retrieved alert policies message ids information."
+message_ids:
+ type: dict
+ description: Details of the message ids.
+ returned: success
+ sample: [
+ {
+ "Category": "System Health",
+ "DetailedDescription": "The current sensor identified in the message has failed. This condition
+ can cause system performance issues and degradation in the monitoring capability of the system.",
+ "Message": "The ${0} sensor has failed, and the last recorded value by the sensor was ${1} A.",
+ "MessageId": "AMP400",
+ "Prefix": "AMP",
+ "RecommendedAction": "Check the Embedded System Management (ESM) Log for any sensor related faults.
+ If there is a failed sensor, replace the system board. For more information, contact your service provider.",
+ "SequenceNo": 400,
+ "Severity": "Critical",
+ "SubCategory": "Amperage"
+ },
+ {
+ "Category": "System Health",
+ "DetailedDescription": "The current sensor identified in the message has failed. This condition can cause
+ system performance issues and degradation in the monitoring capability of the system.",
+ "Message": "Unable to read the ${0} sensor value.",
+ "MessageId": "AMP401",
+ "Prefix": "AMP",
+ "RecommendedAction": "Check the Embedded System Management (ESM) Log for any sensor related faults. If
+ there is a failed sensor, replace the system board. For more information, contact your service provider.",
+ "SequenceNo": 401,
+ "Severity": "Warning",
+ "SubCategory": "Amperage"
+ }
+]
+error_info:
+ type: dict
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key, get_all_data_with_pagination
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+ALERT_MESSAGE_URI = "AlertService/AlertMessageDefinitions"
+SUCCESSFUL_MSG = "Successfully retrieved alert policies message ids information."
+EMPTY_MSG = "No alert policies message id information were found."
+
+
+def main():
+ specs = ome_auth_params
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ message_id_info = get_all_data_with_pagination(rest_obj, ALERT_MESSAGE_URI)
+ if not message_id_info.get("report_list", []):
+ module.exit_json(msg=EMPTY_MSG, message_ids=[])
+ message_ids = remove_key(message_id_info['report_list'])
+ module.exit_json(msg=SUCCESSFUL_MSG, message_ids=message_ids)
+ except HTTPError as err:
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py
index 66a8b26c0..58572bae0 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -38,8 +38,8 @@ options:
enable_authentication:
description:
- Enable or disable authentication to access the SMTP server.
- - The I(credentials) are mandatory if I(enable_authentication) is C(True).
- - The module will always report change when this is C(True).
+ - The I(credentials) are mandatory if I(enable_authentication) is C(true).
+ - The module will always report change when this is C(true).
type: bool
required: true
credentials:
@@ -59,8 +59,8 @@ options:
requirements:
- "python >= 3.8.6"
notes:
- - The module will always report change when I(enable_authentication) is C(True).
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise
+ - The module will always report change when I(enable_authentication) is C(true).
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
or OpenManage Enterprise Modular.
- This module support C(check_mode).
author:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py
index 12c212450..a72093752 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -31,14 +31,14 @@ options:
description: The ID of the syslog server.
type: int
choices: [1, 2, 3, 4]
- required: True
+ required: true
enabled:
description: Enable or disable syslog forwarding.
type: bool
destination_address:
description:
- The IP address, FQDN or hostname of the syslog server.
- - This is required if I(enabled) is C(True).
+ - This is required if I(enabled) is C(true).
type: str
port_number:
description: The UDP port number of the syslog server.
@@ -48,7 +48,7 @@ requirements:
author:
- Jagadeesh N V(@jagadeeshnv)
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py
index 3c9b26994..60f170f76 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -54,13 +54,22 @@ options:
email:
description: Email associated with the issuer. This option is applicable for C(generate_csr).
type: str
+ subject_alternative_names:
+ description:
+ - Subject alternative name required for the certificate signing request generation.
+ - Supports up to 4 comma separated values starting from primary, secondary, Tertiary and Quaternary values.
+ type: str
+ version_added: 8.1.0
upload_file:
type: str
description: Local path of the certificate file to be uploaded. This option is applicable for C(upload).
Once the certificate is uploaded, OpenManage Enterprise cannot be accessed for a few seconds.
requirements:
- - "python >= 3.8.6"
-author: "Felix Stephen (@felixs88)"
+ - "python >= 3.9.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Kritika Bhateja (@Kritika-Bhateja-03)"
+ - "Jennifer John (@Jennifer-John)"
'''
EXAMPLES = r'''
@@ -80,6 +89,22 @@ EXAMPLES = r'''
country: "US"
email: "support@dell.com"
+- name: Generate a certificate signing request with subject alternative names
+ dellemc.openmanage.ome_application_certificate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "generate_csr"
+ distinguished_name: "hostname.com"
+ subject_alternative_names: "hostname1.chassis.com,hostname2.chassis.com"
+ department_name: "Remote Access Group"
+ business_name: "Dell Inc."
+ locality: "Round Rock"
+ country_state: "Texas"
+ country: "US"
+ email: "support@dell.com"
+
- name: Upload the certificate
dellemc.openmanage.ome_application_certificate:
hostname: "192.168.0.1"
@@ -134,7 +159,6 @@ error_info:
import json
import os
-from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
@@ -151,7 +175,8 @@ def get_resource_parameters(module):
"DepartmentName": module.params["department_name"],
"BusinessName": module.params["business_name"],
"Locality": module.params["locality"], "State": module.params["country_state"],
- "Country": module.params["country"], "Email": module.params["email"]}
+ "Country": module.params["country"], "Email": module.params["email"],
+ "San": get_san(module.params["subject_alternative_names"])}
else:
file_path = module.params["upload_file"]
uri = csr_uri.format("UploadCertificate")
@@ -163,6 +188,13 @@ def get_resource_parameters(module):
return method, uri, payload
+def get_san(subject_alternative_names):
+ if not subject_alternative_names:
+ return subject_alternative_names
+
+ return subject_alternative_names.replace(" ", "")
+
+
def main():
specs = {
"command": {"type": "str", "required": False,
@@ -175,6 +207,7 @@ def main():
"country": {"required": False, "type": "str"},
"email": {"required": False, "type": "str"},
"upload_file": {"required": False, "type": "str"},
+ "subject_alternative_names": {"required": False, "type": "str"}
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -202,7 +235,7 @@ def main():
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
module.fail_json(msg=str(err))
except Exception as err:
module.fail_json(msg=str(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
index 67b00dc8b..65b1ae271 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -158,7 +158,7 @@ EXAMPLES = r'''
common_mac_addresses: "::"
server_initiated_discovery:
device_discovery_approval_policy: Automatic
- set_trap_destination: True
+ set_trap_destination: true
mx7000_onboarding_preferences: all
builtin_appliance_share:
share_options: CIFS
@@ -213,7 +213,7 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
server_initiated_discovery:
device_discovery_approval_policy: Automatic
- set_trap_destination: True
+ set_trap_destination: true
mx7000_onboarding_preferences: chassis
email_sender_settings: "admin@dell.com"
trap_forwarding_format: Original
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py
index 03eef19ed..ab8814a42 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -428,7 +428,7 @@ import socket
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
IP_CONFIG = "ApplicationService/Network/AddressConfiguration"
@@ -598,7 +598,7 @@ def get_network_config_data(rest_obj, module):
return int_adp, "POST", POST_IP_CONFIG
else:
return pri_adp, "POST", POST_IP_CONFIG
- except HTTPError as err:
+ except HTTPError:
pass
except Exception as err:
raise err
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py
index 3659d8a3d..0ca58de09 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -60,7 +60,7 @@ requirements:
author:
- "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support C(check_mode).
'''
@@ -147,7 +147,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
PROXY_CONFIG = "ApplicationService/Network/ProxyConfiguration"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py
index 2dfd13a58..91a0de1d7 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -83,7 +83,7 @@ options:
requirements:
- "python >= 3.8.6"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
or OpenManage Enterprise Modular.
- To configure other network settings such as network address, web server, and so on, refer to the respective
OpenManage Enterprise application network setting modules.
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py
index 381ef3191..baf533c0a 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -59,7 +59,7 @@ requirements:
author:
- "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -141,7 +141,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
TIME_CONFIG = "ApplicationService/Network/TimeConfiguration"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py
index adee29dc6..9e6cdffd5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -39,7 +39,7 @@ requirements:
author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py
index d2b23c256..af869fb16 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -75,7 +75,7 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 120
fips_mode_enable:
@@ -96,7 +96,7 @@ author:
requirements:
- "python >= 3.8.6"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
@@ -148,7 +148,7 @@ EXAMPLES = r'''
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
- fips_mode_enable: yes
+ fips_mode_enable: true
'''
RETURN = r'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py
index 6b89fea16..adcc53566 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -44,7 +44,7 @@ options:
slot_name:
type: str
description: Provide name for the slot.
- required: True
+ required: true
slot_options:
type: list
elements: dict
@@ -55,7 +55,7 @@ options:
chassis_service_tag:
type: str
description: Service tag of the chassis.
- required: True
+ required: true
slots:
type: list
elements: dict
@@ -66,17 +66,17 @@ options:
slot_number:
type: int
description: The slot number of the slot to be renamed.
- required: True
+ required: true
slot_name:
type: str
description: Provide name for the slot.
- required: True
+ required: true
requirements:
- "python >= 3.8.6"
notes:
- "This module initiates the refresh inventory task. It may take a minute for new names to be reflected.
If the task exceeds 300 seconds to refresh, the task times out."
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py
index 5cac7352d..fa0f2a90a 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.6.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -102,12 +102,14 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds.The job will only be tracked for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 10800
requirements:
- "python >= 3.8.6"
-author: "Sajna Shetty(@Sajna-Shetty)"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+ - "Abhishek Sinha(@Abhishek-Dell)"
notes:
- This module supports C(check_mode).
- Ensure that the devices have the required licenses to perform the baseline compliance operations.
@@ -288,12 +290,12 @@ error_info:
import json
import time
-import re
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.compat.version import LooseVersion
COMPLIANCE_BASELINE = "TemplateService/Baselines"
REMEDIATE_BASELINE = "TemplateService/Actions/TemplateService.Remediate"
@@ -744,11 +746,10 @@ def create_remediate_payload(noncomplaint_devices, baseline_info, rest_obj):
"RunLater": False
}
}
- pattern = re.compile(r'(1|2|3)\.(0|1|2|3|4)\.?')
- if pattern.match(ome_version):
- payload["TargetIds"] = noncomplaint_devices
- else:
+ if LooseVersion(ome_version) >= "3.5":
payload["DeviceIds"] = noncomplaint_devices
+ else:
+ payload["TargetIds"] = noncomplaint_devices
return payload
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py
index d96cd3769..8132ffe9d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py
@@ -25,18 +25,18 @@ extends_documentation_fragment:
- dellemc.openmanage.oment_auth_options
options:
baseline:
- required: True
+ required: true
description:
- The name of the created baseline.
- A compliance report is generated even when the template is not associated with the baseline.
type: str
device_id:
- required: False
+ required: false
description:
- The ID of the target device which is associated with the I(baseline).
type: int
device_service_tag:
- required: False
+ required: false
description:
- The device service tag of the target device associated with the I(baseline).
- I(device_service_tag) is mutually exclusive with I(device_id).
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py
index 56c1def60..f6a085cd9 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py
@@ -232,7 +232,6 @@ EXAMPLES = """
- fe80::ffff:ffff:ffff:ffff
- ::ffff:192.0.2.0/125
- fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff
-
"""
@@ -281,7 +280,7 @@ from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
try:
from netaddr import IPAddress, IPNetwork, IPRange
@@ -511,7 +510,7 @@ def main():
group_id=group_id, changed=True)
else:
current_device_list = get_current_member_of_group(rest_obj, group_id)
- resp = remove_member_from_group(module, rest_obj, group_id, device_id, current_device_list)
+ remove_member_from_group(module, rest_obj, group_id, device_id, current_device_list)
module.exit_json(msg="Successfully removed member(s) from the device group.", changed=True)
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py
index 846dd5e82..62430402c 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py
@@ -2,16 +2,13 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
-# Copyright (C) 2019-2022 Dell Inc.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
-# Other trademarks may be trademarks of their respective owners.
#
-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
@@ -63,9 +60,11 @@ options:
requirements:
- "python >= 3.8.6"
-author: "Sajna Shetty(@Sajna-Shetty)"
+author:
+ - "Sajna Shetty (@Sajna-Shetty)"
+ - "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -138,7 +137,6 @@ EXAMPLES = """
device_service_tag:
- MXL1234
- MXL4567
-
"""
RETURN = '''
@@ -199,6 +197,7 @@ device_info:
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
@@ -392,9 +391,13 @@ def main():
if device_facts.get("basic_inventory"):
query_param = _get_query_parameters(module.params)
if query_param is not None:
- resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param)
- device_facts = resp.json_data
- resp_status.append(resp.status_code)
+ device_report = get_all_data_with_pagination(rest_obj, device_facts["basic_inventory"], query_param)
+ if not device_report.get("report_list", []):
+ module.exit_json(msg="No devices present.", device_info=[])
+ device_facts = {"@odata.context": device_report["resp_obj"].json_data["@odata.context"],
+ "@odata.count": len(device_report["report_list"]),
+ "value": device_report["report_list"]}
+ resp_status.append(device_report["resp_obj"].status_code)
else:
device_report = rest_obj.get_all_report_details(DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"])
device_facts = {"@odata.context": device_report["resp_obj"].json_data["@odata.context"],
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
index 9b48e33dd..7de50f0fb 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -50,18 +50,19 @@ options:
type: bool
description:
- Enables or disables the chassis power button.
- - If C(False), the chassis cannot be turn on or turn off using the power button.
+ - If C(false), the chassis cannot be turn on or turn off using the power button.
enable_lcd_override_pin:
type: bool
description:
- Enables or disables the LCD override pin.
- - This is required when I(enable_chassis_power_button) is C(False).
+ - This is required when I(enable_chassis_power_button) is C(false).
disabled_button_lcd_override_pin:
- type: int
+ type: str
description:
- The six digit LCD override pin to change the power state of the chassis.
- - This is required when I(enable_lcd_override_pin) is C(True).
- - The module will always report change when I(disabled_button_lcd_override_pin) is C(True).
+ - This is required when I(enable_lcd_override_pin) is C(true).
+ - The module will always report change when I(disabled_button_lcd_override_pin) is C(true).
+ - 'The value must be specified in quotes. ex: "001100".'
quick_sync:
type: dict
description:
@@ -84,7 +85,7 @@ options:
description:
- Inactivity timeout in seconds or minutes.
- The range is 120 to 3600 in seconds, or 2 to 60 in minutes.
- - This option is required when I(enable_inactivity_timeout) is C(True).
+ - This option is required when I(enable_inactivity_timeout) is C(true).
timeout_limit_unit:
type: str
choices: [SECONDS, MINUTES]
@@ -92,7 +93,7 @@ options:
- Inactivity timeout limit unit.
- C(SECONDS) to set I(timeout_limit) in seconds.
- C(MINUTES) to set I(timeout_limit) in minutes.
- - This option is required when I(enable_inactivity_timeout) is C(True).
+ - This option is required when I(enable_inactivity_timeout) is C(true).
enable_read_authentication:
type: bool
description: Enables or disables the option to log in using your user credentials and to read the
@@ -132,10 +133,11 @@ requirements:
- "python >= 3.8.6"
author:
- "Felix Stephen (@felixs88)"
+ - "Shivam Sharma (@ShivamSh3)"
notes:
- Run this module from a system that has direct access to OpenManage Enterprise Modular.
- This module supports C(check_mode).
- - The module will always report change when I(enable_chassis_power_button) is C(True).
+ - The module will always report change when I(enable_chassis_power_button) is C(true).
"""
EXAMPLES = """
@@ -152,7 +154,7 @@ EXAMPLES = """
chassis_power_button:
enable_chassis_power_button: false
enable_lcd_override_pin: true
- disabled_button_lcd_override_pin: 123456
+ disabled_button_lcd_override_pin: "123456"
- name: Configure Quick sync and LCD settings of the chassis using device service tag.
dellemc.openmanage.ome_device_local_access_configuration:
@@ -184,7 +186,7 @@ EXAMPLES = """
chassis_power_button:
enable_chassis_power_button: false
enable_lcd_override_pin: true
- disabled_button_lcd_override_pin: 123456
+ disabled_button_lcd_override_pin: "123456"
quick_sync:
quick_sync_access: READ_WRITE
enable_read_authentication: true
@@ -417,7 +419,7 @@ def main():
chassis_power = {
"enable_chassis_power_button": {"type": "bool", "required": True},
"enable_lcd_override_pin": {"type": "bool", "required": False},
- "disabled_button_lcd_override_pin": {"type": "int", "required": False, "no_log": True}}
+ "disabled_button_lcd_override_pin": {"type": "str", "required": False, "no_log": True}}
quick_sync_options = {
"quick_sync_access": {"type": "str", "required": False, "choices": ["DISABLED", "READ_ONLY", "READ_WRITE"]},
"enable_inactivity_timeout": {"type": "bool", "required": False},
@@ -470,7 +472,7 @@ def main():
resp_data["QuickSync"]["TimeoutLimitUnit"] = "MINUTES"
module.exit_json(msg=SUCCESS_MSG, local_access_settings=resp_data, changed=True)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py
index 96a61a29b..9c73b7c46 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -59,7 +59,7 @@ requirements:
author:
- "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py
index e895472ea..0d4b0a483 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -62,7 +62,7 @@ options:
description:
- "Enable or disable the automatic request to obtain an IPv4 address from the IPv4 Dynamic Host Configuration
Protocol (DHCP) server."
- - "C(NOTE) If this option is C(True), the values provided for I(static_ip_address), I(static_subnet_mask),
+ - "C(NOTE) If this option is C(true), the values provided for I(static_ip_address), I(static_subnet_mask),
and I(static_gateway) are not applied for these fields. However, the module may report changes."
type: bool
static_ip_address:
@@ -84,7 +84,7 @@ options:
description:
- This option allows to automatically request and obtain IPv4 address for the DNS Server from the DHCP server.
- This option is applicable when I(enable_dhcp) is true.
- - "C(NOTE) If this option is C(True), the values provided for I(static_preferred_dns_server) and
+ - "C(NOTE) If this option is C(true), the values provided for I(static_preferred_dns_server) and
I(static_alternate_dns_server) are not applied for these fields. However, the module may report changes."
type: bool
static_preferred_dns_server:
@@ -114,7 +114,7 @@ options:
advertisements(RA)"
- "If I(enable_auto_configuration) is C(true), OpenManage Enterprise Modular retrieves IP configuration
(IPv6 address, prefix, and gateway address) from a DHCPv6 server on the existing network."
- - "C(NOTE) If this option is C(True), the values provided for I(static_ip_address), I(static_prefix_length),
+ - "C(NOTE) If this option is C(true), the values provided for I(static_ip_address), I(static_prefix_length),
and I(static_gateway) are not applied for these fields. However, the module may report changes."
type: bool
static_ip_address:
@@ -136,7 +136,7 @@ options:
description:
- This option allows to automatically request and obtain a IPv6 address for the DNS server from the DHCP server.
- This option is applicable when I(enable_auto_configuration) is true
- - "C(NOTE) If this option is C(True), the values provided for I(static_preferred_dns_server) and I(static_alternate_dns_server)
+ - "C(NOTE) If this option is C(true), the values provided for I(static_preferred_dns_server) and I(static_alternate_dns_server)
are not applied for these fields. However, the module may report changes."
type: bool
static_preferred_dns_server:
@@ -231,7 +231,7 @@ options:
requirements:
- "python >= 3.8.6"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
@@ -267,7 +267,7 @@ EXAMPLES = """
use_dhcp_for_dns_domain_name: false
dns_name: "MX-SVCTAG"
dns_domain_name: "dnslocaldomain"
- auto_negotiation: no
+ auto_negotiation: false
network_speed: 100_MB
- name: Network settings for server
@@ -325,7 +325,7 @@ EXAMPLES = """
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
- device_id : 12345
+ device_id: 12345
management_vlan:
enable_vlan: true
vlan_id: 2345
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py
index 81475d48b..6d1518b34 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -83,7 +83,7 @@ requirements:
author:
- "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py
index ec99e693a..f0587791e 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -81,7 +81,7 @@ requirements:
author:
- "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
index 183b7f67e..f12cf7078 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.7.0
+# Copyright (C) 2022-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -36,7 +36,7 @@ options:
- I(device_service_tag) is mutually exclusive with I(device_id).
setting_type:
type: str
- required: True
+ required: true
choices: [ServerQuickDeploy, IOMQuickDeploy]
description:
- The type of the Quick Deploy settings to be applied.
@@ -45,16 +45,16 @@ options:
job_wait:
type: bool
description: Determines whether to wait for the job completion or not.
- default: True
+ default: true
job_wait_timeout:
type: int
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
default: 120
quick_deploy_options:
type: dict
- required: True
+ required: true
description: The Quick Deploy settings for server and IOM quick deploy.
suboptions:
password:
@@ -70,7 +70,7 @@ options:
choices: [Static, DHCP]
description:
- IPv4 network type.
- - I(ipv4_network_type) is required if I(ipv4_enabled) is C(True).
+ - I(ipv4_network_type) is required if I(ipv4_enabled) is C(true).
- C(Static) to configure the static IP settings.
- C(DHCP) to configure the Dynamic IP settings.
ipv4_subnet_mask:
@@ -91,7 +91,7 @@ options:
choices: [Static, DHCP]
description:
- IPv6 network type.
- - I(ipv6_network_type) is required if I(ipv6_enabled) is C(True).
+ - I(ipv6_network_type) is required if I(ipv6_enabled) is C(true).
- C(Static) to configure the static IP settings.
- C(DHCP) to configure the Dynamic IP settings.
ipv6_prefix_length:
@@ -111,7 +111,7 @@ options:
suboptions:
slot_id:
type: int
- required: True
+ required: true
description: The ID of the slot.
slot_ipv4_address:
type: str
@@ -123,9 +123,10 @@ options:
type: int
description: The ID of the VLAN.
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
+ - "Shivam Sharma (@ShivamSh3)"
notes:
- Run this module from a system that has direct access to OpenManage Enterprise Modular.
- This module supports C(check_mode).
@@ -147,11 +148,11 @@ EXAMPLES = """
ca_path: "/path/to/ca_cert.pem"
quick_deploy_options:
password: "password"
- ipv4_enabled: True
+ ipv4_enabled: true
ipv4_network_type: Static
ipv4_subnet_mask: 255.255.255.0
ipv4_gateway: 192.168.0.1
- ipv6_enabled: True
+ ipv6_enabled: true
ipv6_network_type: Static
ipv6_prefix_length: 1
ipv6_gateway: "::"
@@ -175,11 +176,11 @@ EXAMPLES = """
ca_path: "/path/to/ca_cert.pem"
quick_deploy_options:
password: "password"
- ipv4_enabled: True
+ ipv4_enabled: true
ipv4_network_type: Static
ipv4_subnet_mask: 255.255.255.0
ipv4_gateway: 192.168.0.1
- ipv6_enabled: True
+ ipv6_enabled: true
ipv6_network_type: Static
ipv6_prefix_length: 1
ipv6_gateway: "::"
@@ -395,7 +396,6 @@ def ip_address_field(module, field, deploy_options, slot=False):
valid = validate_ip_address(module_params.get(val[0]), val[1])
if valid is False:
module.fail_json(msg=IP_FAIL_MSG.format(field_value, val[0]))
- return
def check_domain_service(module, rest_obj):
@@ -405,7 +405,6 @@ def check_domain_service(module, rest_obj):
err_message = json.load(err)
if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
module.fail_json(msg=DOMAIN_FAIL_MSG)
- return
def get_ip_from_host(hostname):
@@ -446,33 +445,9 @@ def check_mode_validation(module, deploy_data):
ipv6_enabled_deploy = deploy_data["ProtocolTypeV6"]
ipv4_nt_deploy = deploy_data.get("NetworkTypeV4")
ipv6_nt_deploy = deploy_data.get("NetworkTypeV6")
- if ipv4_enabled is not None and ipv4_enabled is True or \
- ipv4_enabled_deploy is not None and ipv4_enabled_deploy is True:
- req_data["ProtocolTypeV4"] = None
- if ipv4_enabled is not None:
- req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
- ipv4_network_type = deploy_options.get("ipv4_network_type")
- req_data["NetworkTypeV4"] = ipv4_network_type
- if ipv4_network_type == "Static" or ipv4_nt_deploy is not None and ipv4_nt_deploy == "Static":
- req_data["IpV4SubnetMask"] = deploy_options.get("ipv4_subnet_mask")
- req_data["IpV4Gateway"] = deploy_options.get("ipv4_gateway")
- elif ipv4_enabled is not None and ipv4_enabled is False:
- req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
ipv6_enabled = deploy_options.get("ipv6_enabled")
- if ipv6_enabled is not None and ipv6_enabled is True or \
- ipv6_enabled_deploy is not None and ipv6_enabled_deploy is True:
- req_data["ProtocolTypeV6"] = None
- if ipv6_enabled is not None:
- req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
- ipv6_network_type = deploy_options.get("ipv6_network_type")
- req_data["NetworkTypeV6"] = ipv6_network_type
- if ipv6_network_type == "Static" or ipv6_nt_deploy is not None and ipv6_nt_deploy == "Static":
- req_data["PrefixLength"] = deploy_options.get("ipv6_prefix_length")
- if deploy_options.get("ipv6_prefix_length") is not None:
- req_data["PrefixLength"] = str(deploy_options.get("ipv6_prefix_length"))
- req_data["IpV6Gateway"] = deploy_options.get("ipv6_gateway")
- elif ipv6_enabled is not None and ipv6_enabled is False:
- req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
+ update_ipv4_data(req_data, ipv4_enabled, ipv4_enabled_deploy, ipv4_nt_deploy, deploy_options)
+ update_ipv6_data(req_data, ipv6_enabled, ipv6_enabled_deploy, ipv6_nt_deploy, deploy_options)
resp_data = {
"ProtocolTypeV4": str(ipv4_enabled_deploy).lower(), "NetworkTypeV4": deploy_data.get("NetworkTypeV4"),
"IpV4SubnetMask": deploy_data.get("IpV4SubnetMask"), "IpV4Gateway": deploy_data.get("IpV4Gateway"),
@@ -480,7 +455,9 @@ def check_mode_validation(module, deploy_data):
"PrefixLength": deploy_data.get("PrefixLength"), "IpV6Gateway": deploy_data.get("IpV6Gateway")}
resp_filter_data = dict([(k, v) for k, v in resp_data.items() if v is not None])
req_data_filter = dict([(k, v) for k, v in req_data.items() if v is not None])
- diff_changes = [bool(set(resp_filter_data.items()) ^ set(req_data_filter.items()))]
+ copy_resp_filter_data = copy.deepcopy(resp_filter_data)
+ copy_resp_filter_data.update(req_data_filter)
+ diff_changes = [bool(set(resp_filter_data.items()) ^ set(copy_resp_filter_data.items()))]
req_slot_payload, invalid_slot = [], []
slots = deploy_options.get("slots")
if slots is not None:
@@ -492,11 +469,16 @@ def check_mode_validation(module, deploy_data):
"SlotIPV6Address": each.get("slot_ipv6_address"), "VlanId": each.get("vlan_id")}
if each.get("vlan_id") is not None:
req_slot_1.update({"VlanId": str(each.get("vlan_id"))})
+ else:
+ req_slot_1.update({"VlanId": ""})
req_filter_slot = dict([(k, v) for k, v in req_slot_1.items() if v is not None])
exist_slot_1 = {"SlotId": exist_filter_slot[0]["SlotId"],
"SlotIPV4Address": exist_filter_slot[0]["SlotIPV4Address"],
- "SlotIPV6Address": exist_filter_slot[0]["SlotIPV6Address"],
- "VlanId": exist_filter_slot[0]["VlanId"]}
+ "SlotIPV6Address": exist_filter_slot[0]["SlotIPV6Address"]}
+ if "VlanId" in exist_filter_slot[0]:
+ exist_slot_1.update({"VlanId": exist_filter_slot[0]["VlanId"]})
+ else:
+ exist_slot_1.update({"VlanId": ""})
exist_filter_slot = dict([(k, v) for k, v in exist_slot_1.items() if v is not None])
cp_exist_filter_slot = copy.deepcopy(exist_filter_slot)
cp_exist_filter_slot.update(req_filter_slot)
@@ -513,9 +495,48 @@ def check_mode_validation(module, deploy_data):
module.exit_json(msg=NO_CHANGES_FOUND, quick_deploy_settings=deploy_data)
req_payload.update(resp_filter_data)
req_payload.update(req_data_filter)
+ update_prefix_length(req_payload)
return req_payload, req_slot_payload
+def update_ipv4_data(req_data, ipv4_enabled, ipv4_enabled_deploy, ipv4_nt_deploy, deploy_options):
+ if ipv4_enabled is not None and ipv4_enabled is True or \
+ ipv4_enabled_deploy is not None and ipv4_enabled_deploy is True:
+ req_data["ProtocolTypeV4"] = None
+ if ipv4_enabled is not None:
+ req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
+ ipv4_network_type = deploy_options.get("ipv4_network_type")
+ req_data["NetworkTypeV4"] = ipv4_network_type
+ if ipv4_network_type == "Static" or ipv4_nt_deploy is not None and ipv4_nt_deploy == "Static":
+ req_data["IpV4SubnetMask"] = deploy_options.get("ipv4_subnet_mask")
+ req_data["IpV4Gateway"] = deploy_options.get("ipv4_gateway")
+ elif ipv4_enabled is not None and ipv4_enabled is False:
+ req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
+
+
+def update_ipv6_data(req_data, ipv6_enabled, ipv6_enabled_deploy, ipv6_nt_deploy, deploy_options):
+ if ipv6_enabled is not None and ipv6_enabled is True or \
+ ipv6_enabled_deploy is not None and ipv6_enabled_deploy is True:
+ req_data["ProtocolTypeV6"] = None
+ if ipv6_enabled is not None:
+ req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
+ ipv6_network_type = deploy_options.get("ipv6_network_type")
+ req_data["NetworkTypeV6"] = ipv6_network_type
+ if ipv6_network_type == "Static" or ipv6_nt_deploy is not None and ipv6_nt_deploy == "Static":
+ req_data["PrefixLength"] = deploy_options.get("ipv6_prefix_length")
+ if deploy_options.get("ipv6_prefix_length") is not None:
+ req_data["PrefixLength"] = str(deploy_options.get("ipv6_prefix_length"))
+ req_data["IpV6Gateway"] = deploy_options.get("ipv6_gateway")
+ elif ipv6_enabled is not None and ipv6_enabled is False:
+ req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
+
+
+def update_prefix_length(req_payload):
+ prefix_length = req_payload.get("PrefixLength")
+ if prefix_length == '0':
+ req_payload["PrefixLength"] = ""
+
+
def job_payload_submission(rest_obj, payload, slot_payload, settings_type, device_id, resp_data):
job_params = []
job_params.append({"Key": "protocolTypeV4", "Value": payload["ProtocolTypeV4"]})
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
index 954395280..876e5b235 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
@@ -63,7 +63,7 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 1200
job_schedule:
@@ -233,8 +233,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import \
- get_rest_items, strip_substr_dict, job_tracking, apply_diff_key
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict, job_tracking
from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import CHANGES_MSG, NO_CHANGES_MSG
DEVICE_URI = "DeviceService/Devices"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py
index 71b0e0960..b16604c3c 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -60,7 +60,7 @@ options:
- Select this option to mask the personal identification information such as IPAddress,
DNS, alert destination, email, gateway, inet6, MacAddress, netmask etc.
- This option is applicable for C(application) of I(log_type).
- default: False
+ default: false
log_selectors:
type: list
description:
@@ -75,17 +75,17 @@ options:
elements: str
share_address:
type: str
- required: True
+ required: true
description: Network share IP address.
share_name:
type: str
- required: True
+ required: true
description:
- Network share path.
- Filename is auto generated and should not be provided as part of I(share_name).
share_type:
type: str
- required: True
+ required: true
description: Network share type
choices: [NFS, CIFS]
share_user:
@@ -108,7 +108,7 @@ options:
description:
- Whether to wait for the Job completion or not.
- The maximum wait time is I(job_wait_timeout).
- default: True
+ default: true
job_wait_timeout:
type: int
description:
@@ -120,13 +120,13 @@ options:
description:
- Test the availability of the network share location.
- I(job_wait) and I(job_wait_timeout) options are not applicable for I(test_connection).
- default: False
+ default: false
lead_chassis_only:
type: bool
description:
- Extract the logs from Lead chassis only.
- I(lead_chassis_only) is only applicable when I(log_type) is C(application) on OpenManage Enterprise Modular.
- default: False
+ default: false
requirements:
- "python >= 3.8.6"
author:
@@ -505,7 +505,7 @@ def main():
resp = response.json_data
if resp:
resp = rest_obj.strip_substr_dict(resp)
- module.exit_json(msg=message, job_status=resp)
+ module.exit_json(msg=message, job_status=resp, changed=True)
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py
index a4fde99f9..f50d8f25e 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -80,16 +80,16 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 10800
ignore_partial_failure:
description:
- "Provides the option to ignore partial failures. Partial failures occur when there is a combination of both
discovered and undiscovered IPs."
- - If C(False), then the partial failure is not ignored, and the module will error out.
- - If C(True), then the partial failure is ignored.
- - This option is only applicable if I(job_wait) is C(True).
+ - If C(false), then the partial failure is not ignored, and the module will error out.
+ - If C(true), then the partial failure is ignored.
+ - This option is only applicable if I(job_wait) is C(true).
type: bool
default: false
discovery_config_targets:
@@ -370,8 +370,9 @@ requirements:
author:
- "Jagadeesh N V (@jagadeeshnv)"
- "Sajna Shetty (@Sajna-Shetty)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support C(check_mode).
- If I(state) is C(present), then Idempotency is not supported.
'''
@@ -496,9 +497,9 @@ EXAMPLES = r'''
password: ipmi_pwd
schedule: RunLater
cron: "0 0 9 ? * MON,WED,FRI *"
- ignore_partial_failure: True
- trap_destination: True
- community_string: True
+ ignore_partial_failure: true
+ trap_destination: true
+ community_string: true
email_recipient: test_email@company.com
- name: Discover servers with ca check enabled
@@ -516,7 +517,7 @@ EXAMPLES = r'''
wsman:
username: user
password: password
- ca_check: True
+ ca_check: true
certificate_data: "{{ lookup('ansible.builtin.file', '/path/to/certificate_data_file') }}"
- name: Discover chassis with ca check enabled data
@@ -534,7 +535,7 @@ EXAMPLES = r'''
redfish:
username: user
password: password
- ca_check: True
+ ca_check: true
certificate_data: "-----BEGIN CERTIFICATE-----\r\n
ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
@@ -598,6 +599,27 @@ discovery_ids:
returned: when discoveries with duplicate name exist for I(state) is C(present)
type: list
sample: [1234, 5678]
+job_detailed_status:
+ description: Detailed last execution history of a job.
+ returned: All time.
+ type: list
+ sample: [
+ {
+ "ElapsedTime": "00:00:00",
+ "EndTime": null,
+ "ExecutionHistoryId": 564873,
+ "Id": 656893,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2050,
+ "Name": "Running"
+ },
+ "Key": "192.96.24.1",
+ "Progress": "0",
+ "StartTime": "2023-07-04 06:23:54.008",
+ "Value": "Running\nDiscovery of target 192.96.24.1 started.\nDiscovery target resolved to IP 192.96.24.1 ."
+ }
+ ]
error_info:
description: Details of the HTTP Error.
returned: on HTTP error
@@ -622,10 +644,10 @@ error_info:
import json
import time
-from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
@@ -650,6 +672,11 @@ DISCOVERY_PARTIAL = "Some IPs are not discovered."
ATLEAST_ONE_PROTOCOL = "Protocol not applicable for given device types."
INVALID_DISCOVERY_ID = "Invalid discovery ID provided."
SETTLING_TIME = 5
+JOB_STATUS_MAP = {
+ 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "completed successfully",
+ 2070: "Failed", 2090: "completed with errors", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped",
+ 2103: "Canceled"
+}
def check_existing_discovery(module, rest_obj):
@@ -720,39 +747,37 @@ def get_schedule(module):
return schedule_payload
-def get_execution_details(module, rest_obj, job_id):
+def get_execution_details(rest_obj, job_id):
try:
+ ips = {"Completed": [], "Failed": []}
+ job_detail_status = []
resp = rest_obj.invoke_request('GET', JOB_EXEC_HISTORY.format(job_id=job_id))
ex_hist = resp.json_data.get('value')
# Sorting based on startTime and to get latest execution instance.
tmp_dict = dict((x["StartTime"], x["Id"]) for x in ex_hist)
sorted_dates = sorted(tmp_dict.keys())
ex_url = JOB_EXEC_HISTORY.format(job_id=job_id) + "({0})/ExecutionHistoryDetails".format(tmp_dict[sorted_dates[-1]])
- ips = {"Completed": [], "Failed": []}
all_exec = rest_obj.get_all_items_with_pagination(ex_url)
for jb_ip in all_exec.get('value'):
+ jb_ip = strip_substr_dict(jb_ip)
+ jb_ip.get('JobStatus', {}).pop('@odata.type', None)
+ job_detail_status.append(jb_ip)
jobstatus = jb_ip.get('JobStatus', {}).get('Name', 'Unknown')
jlist = ips.get(jobstatus, [])
jlist.append(jb_ip.get('Key'))
ips[jobstatus] = jlist
except Exception:
- ips = {"Completed": [], "Failed": []}
- return ips
+ pass
+ return ips, job_detail_status
def discovery_job_tracking(rest_obj, job_id, job_wait_sec):
- job_status_map = {
- 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "completed successfully",
- 2070: "Failed", 2090: "completed with errors", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped",
- 2103: "Canceled"
- }
sleep_interval = 30
max_retries = job_wait_sec // sleep_interval
failed_job_status = [2070, 2100, 2101, 2102, 2103]
success_job_status = [2060, 2020, 2090]
job_url = (DISCOVERY_JOBS_URI + "({job_id})").format(job_id=job_id)
loop_ctr = 0
- job_failed = True
time.sleep(SETTLING_TIME)
while loop_ctr < max_retries:
loop_ctr += 1
@@ -761,17 +786,15 @@ def discovery_job_tracking(rest_obj, job_id, job_wait_sec):
job_dict = job_resp.json_data
job_status = job_dict['JobStatusId']
if job_status in success_job_status:
- job_failed = False
- return job_failed, JOB_TRACK_SUCCESS.format(job_status_map[job_status])
+ return JOB_TRACK_SUCCESS.format(JOB_STATUS_MAP[job_status])
elif job_status in failed_job_status:
- job_failed = True
- return job_failed, JOB_TRACK_FAIL.format(job_status_map[job_status])
+ return JOB_TRACK_FAIL.format(JOB_STATUS_MAP[job_status])
time.sleep(sleep_interval)
except HTTPError:
- return job_failed, JOB_TRACK_UNABLE.format(job_id)
+ return JOB_TRACK_UNABLE.format(job_id)
except Exception as err:
- return job_failed, str(err)
- return job_failed, JOB_TRACK_INCOMPLETE.format(job_id, max_retries)
+ return str(err)
+ return JOB_TRACK_INCOMPLETE.format(job_id, max_retries)
def get_job_data(discovery_json, rest_obj):
@@ -879,19 +902,22 @@ def exit_discovery(module, rest_obj, job_id):
msg = DISCOVERY_SCHEDULED
time.sleep(SETTLING_TIME)
djob = get_discovery_job(rest_obj, job_id)
+ detailed_job = []
if module.params.get("job_wait") and module.params.get('schedule') == 'RunNow':
- job_failed, job_message = discovery_job_tracking(rest_obj, job_id,
- job_wait_sec=module.params["job_wait_timeout"])
- if job_failed is True:
- djob.update({"Completed": [], "Failed": []})
- module.fail_json(msg=job_message, discovery_status=djob)
+ job_message = discovery_job_tracking(rest_obj, job_id, job_wait_sec=module.params["job_wait_timeout"])
msg = job_message
- ip_details = get_execution_details(module, rest_obj, job_id)
+ ip_details, detailed_job = get_execution_details(rest_obj, job_id)
djob = get_discovery_job(rest_obj, job_id)
djob.update(ip_details)
- if ip_details.get("Failed") and module.params.get("ignore_partial_failure") is False:
- module.fail_json(msg=DISCOVERY_PARTIAL, discovery_status=djob)
- module.exit_json(msg=msg, discovery_status=djob, changed=True)
+ if djob["JobStatusId"] == 2090 and not module.params.get("ignore_partial_failure"):
+ module.fail_json(msg=DISCOVERY_PARTIAL, discovery_status=djob, job_detailed_status=detailed_job)
+ if djob["JobStatusId"] == 2090 and module.params.get("ignore_partial_failure"):
+ module.exit_json(msg=JOB_TRACK_SUCCESS.format(JOB_STATUS_MAP[djob["JobStatusId"]]), discovery_status=djob,
+ job_detailed_status=detailed_job, changed=True)
+ if ip_details.get("Failed"):
+ module.fail_json(msg=JOB_TRACK_FAIL.format(JOB_STATUS_MAP[djob["JobStatusId"]]), discovery_status=djob,
+ job_detailed_status=detailed_job)
+ module.exit_json(msg=msg, discovery_status=djob, job_detailed_status=detailed_job, changed=True)
def create_discovery(module, rest_obj):
@@ -997,27 +1023,27 @@ def main():
"timeout": {"type": 'int', "default": 60},
"kgkey": {"type": 'str', "no_log": True}
}
- DiscoveryConfigModel = {"device_types": {"required": True, 'type': 'list', "elements": 'str'},
- "network_address_detail": {"required": True, "type": 'list', "elements": 'str'},
- "wsman": {"type": 'dict', "options": http_creds,
- "required_if": [['ca_check', True, ('certificate_data',)]]},
- "storage": {"type": 'dict', "options": http_creds,
- "required_if": [['ca_check', True, ('certificate_data',)]]},
- "redfish": {"type": 'dict', "options": http_creds,
+ discovery_config_model = {"device_types": {"required": True, 'type': 'list', "elements": 'str'},
+ "network_address_detail": {"required": True, "type": 'list', "elements": 'str'},
+ "wsman": {"type": 'dict', "options": http_creds,
"required_if": [['ca_check', True, ('certificate_data',)]]},
- "vmware": {"type": 'dict', "options": http_creds,
- "required_if": [['ca_check', True, ('certificate_data',)]]},
- "snmp": {"type": 'dict', "options": snmp_creds},
- "ssh": {"type": 'dict', "options": ssh_creds},
- "ipmi": {"type": 'dict', "options": ipmi_creds},
- }
+ "storage": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "redfish": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "vmware": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "snmp": {"type": 'dict', "options": snmp_creds},
+ "ssh": {"type": 'dict', "options": ssh_creds},
+ "ipmi": {"type": 'dict', "options": ipmi_creds},
+ }
specs = {
"discovery_job_name": {"type": 'str'},
"discovery_id": {"type": 'int'},
"state": {"default": "present", "choices": ['present', 'absent']},
"new_name": {"type": 'str'},
"discovery_config_targets":
- {"type": 'list', "elements": 'dict', "options": DiscoveryConfigModel,
+ {"type": 'list', "elements": 'dict', "options": discovery_config_model,
"required_one_of": [
('wsman', 'storage', 'redfish', 'vmware', 'snmp', 'ssh', 'ipmi')
]},
@@ -1059,7 +1085,7 @@ def main():
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
module.fail_json(msg=str(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py
index 7b74c306e..bd15dccc6 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -16,10 +16,10 @@ __metaclass__ = type
DOCUMENTATION = r"""
---
module: ome_domain_user_groups
-short_description: Create, modify, or delete an Active Directory user group on
+short_description: Create, modify, or delete an Active Directory/LDAP user group on
OpenManage Enterprise and OpenManage Enterprise Modular
version_added: "4.0.0"
-description: This module allows to create, modify, or delete an Active Directory user group on
+description: This module allows to create, modify, or delete an Active Directory/LDAP user group on
OpenManage Enterprise and OpenManage Enterprise Modular.
extends_documentation_fragment:
- dellemc.openmanage.ome_auth_options
@@ -27,21 +27,21 @@ options:
state:
type: str
description:
- - C(present) imports or modifies the Active Directory user group.
- - C(absent) deletes an existing Active Directory user group.
+ - C(present) imports or modifies the Active Directory/LDAP user group.
+ - C(absent) deletes an existing Active Directory/LDAP user group.
choices: [present, absent]
default: present
group_name:
type: str
- required: True
+ required: true
description:
- - The desired Active Directory user group name to be imported or removed.
+ - The desired Active Directory/LDAP user group name to be imported or removed.
- "Examples for user group name: Administrator or Account Operators or Access Control Assistance Operator."
- I(group_name) value is case insensitive.
role:
type: str
description:
- - The desired roles and privilege for the imported Active Directory user group.
+ - The desired roles and privilege for the imported Active Directory/LDAP user group.
- "OpenManage Enterprise Modular Roles: CHASSIS ADMINISTRATOR, COMPUTE MANAGER, STORAGE MANAGER,
FABRIC MANAGER, VIEWER."
- "OpenManage Enterprise Roles: ADMINISTRATOR, DEVICE MANAGER, VIEWER."
@@ -49,26 +49,33 @@ options:
directory_name:
type: str
description:
- - The directory name set while adding the Active Directory.
+ - The directory name set while adding the Active Directory/LDAP.
- I(directory_name) is mutually exclusive with I(directory_id).
+ directory_type:
+ type: str
+ description:
+ - Type of the account.
+ choices: ['AD', 'LDAP']
+ default: 'AD'
directory_id:
type: int
description:
- - The ID of the Active Directory.
+ - The ID of the Active Directory/LDAP.
- I(directory_id) is mutually exclusive with I(directory_name).
domain_username:
type: str
description:
- - Active directory domain username.
+ - Active Directory/LDAP domain username.
- "Example: username@domain or domain\\username."
domain_password:
type: str
description:
- - Active directory domain password.
+ - Active Directory/LDAP domain password.
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
notes:
- This module supports C(check_mode) and idempotency.
- Run this module from a system that has direct access to OpenManage Enterprise
@@ -108,15 +115,38 @@ EXAMPLES = r"""
ca_path: "/path/to/ca_cert.pem"
state: absent
group_name: administrators
+
+- name: Import LDAP directory group.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ directory_type: LDAP
+ state: present
+ group_name: account operators
+ directory_name: directory_name
+ role: administrator
+ domain_username: username@domain
+ domain_password: domain_password
+
+- name: Remove LDAP directory group.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ group_name: account operators
"""
RETURN = r"""
---
msg:
type: str
- description: Overall status of the Active Directory user group operation.
+ description: Overall status of the Active Directory/LDAP user group operation.
returned: always
- sample: Successfully imported the active directory user group.
+ sample: Successfully imported the Active Directory/LDAP user group.
domain_user_status:
description: Details of the domain user operation, when I(state) is C(present).
returned: When I(state) is C(present).
@@ -171,8 +201,9 @@ from ansible.module_utils.urls import ConnectionError, SSLValidationError
ROLE_URI = "AccountService/Roles"
ACCOUNT_URI = "AccountService/Accounts"
GET_AD_ACC = "AccountService/ExternalAccountProvider/ADAccountProvider"
+GET_LDAP_ACC = "AccountService/ExternalAccountProvider/LDAPAccountProvider"
IMPORT_ACC_PRV = "AccountService/Actions/AccountService.ImportExternalAccountProvider"
-SEARCH_AD = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.SearchGroups"
+SEARCH_GROUPS = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.SearchGroups"
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_FOUND = "Changes found to be applied."
@@ -185,7 +216,8 @@ def get_directory(module, rest_obj):
dir_id = None
if user_dir_name is None and user_dir_id is None:
module.fail_json(msg="missing required arguments: directory_name or directory_id")
- directory_resp = rest_obj.invoke_request("GET", GET_AD_ACC)
+ URI = GET_AD_ACC if module.params.get("directory_type") == "AD" else GET_LDAP_ACC
+ directory_resp = rest_obj.invoke_request("GET", URI)
for dire in directory_resp.json_data["value"]:
if user_dir_name is not None and dire["Name"] == user_dir_name:
dir_id = dire["Id"]
@@ -201,16 +233,17 @@ def get_directory(module, rest_obj):
def search_directory(module, rest_obj, dir_id):
group_name, obj_gui_id, common_name = module.params["group_name"], None, None
- payload = {"DirectoryServerId": dir_id, "Type": "AD",
+ payload = {"DirectoryServerId": dir_id,
+ "Type": module.params["directory_type"],
"UserName": module.params["domain_username"],
"Password": module.params["domain_password"],
"CommonName": group_name}
try:
- resp = rest_obj.invoke_request("POST", SEARCH_AD, data=payload)
- for ad in resp.json_data:
- if ad["CommonName"].lower() == group_name.lower():
- obj_gui_id = ad["ObjectGuid"]
- common_name = ad["CommonName"]
+ resp = rest_obj.invoke_request("POST", SEARCH_GROUPS, data=payload)
+ for key in resp.json_data:
+ if key["CommonName"].lower() == group_name.lower():
+ obj_gui_id = key["ObjectGuid"]
+ common_name = key["CommonName"]
break
else:
module.fail_json(msg="Unable to complete the operation because the entered "
@@ -227,7 +260,7 @@ def directory_user(module, rest_obj):
user = get_directory_user(module, rest_obj)
new_role_id = get_role(module, rest_obj)
dir_id = get_directory(module, rest_obj)
- domain_resp, msg = None, ''
+ domain_resp, local_msg, msg = None, '', ''
if user is None:
obj_gui_id, common_name = search_directory(module, rest_obj, dir_id)
if module.check_mode:
@@ -238,7 +271,7 @@ def directory_user(module, rest_obj):
"IsBuiltin": False, "Enabled": True, "ObjectGuid": obj_gui_id}
]
domain_resp = rest_obj.invoke_request("POST", IMPORT_ACC_PRV, data=payload)
- msg = 'imported'
+ local_msg, msg = 'import', 'imported'
else:
if (int(user["RoleId"]) == new_role_id):
user = rest_obj.strip_substr_dict(user)
@@ -250,9 +283,9 @@ def directory_user(module, rest_obj):
if module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True, domain_user_status=payload)
domain_resp = rest_obj.invoke_request("PUT", update_uri, data=payload)
- msg = 'updated'
+ local_msg, msg = 'update', 'updated'
if domain_resp is None:
- module.fail_json(msg="Unable to complete the Active Directory user account.")
+ module.fail_json(msg="Unable to {0} the domain user group.".format(local_msg))
return domain_resp.json_data, msg
@@ -293,11 +326,11 @@ def get_directory_user(module, rest_obj):
def delete_directory_user(rest_obj, user_id):
delete_uri, changed = "{0}('{1}')".format(ACCOUNT_URI, user_id), False
- msg = "Invalid active directory user group name provided."
+ msg = "Invalid domain user group name provided."
resp = rest_obj.invoke_request('DELETE', delete_uri)
if resp.status_code == 204:
changed = True
- msg = "Successfully deleted the active directory user group."
+ msg = "Successfully deleted the domain user group."
return msg, changed
@@ -308,6 +341,7 @@ def main():
"group_name": {"required": True, "type": 'str'},
"role": {"required": False, "type": 'str'},
"directory_name": {"required": False, "type": 'str'},
+ "directory_type": {"type": 'str', "choices": ['AD', 'LDAP'], "default": "AD"},
"directory_id": {"required": False, "type": 'int'},
"domain_username": {"required": False, "type": 'str'},
"domain_password": {"required": False, "type": 'str', "no_log": True},
@@ -324,10 +358,10 @@ def main():
if isinstance(resp, list):
resp = resp[0]
module.exit_json(
- msg="Successfully {0} the active directory user group.".format(msg),
+ msg="Successfully {0} the domain user group.".format(msg),
domain_user_status=resp, changed=True
)
- if module.params["state"] == "absent":
+ else:
user = get_directory_user(module, rest_obj)
msg, changed = delete_directory_user(rest_obj, int(user["Id"]))
user = rest_obj.strip_substr_dict(user)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py
index a3bfff955..5e83a07d5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -60,6 +60,7 @@ options:
- This option is case sensitive.
- This is applicable to I(device_service_tag), I(device_id), and I(baseline_name).
type: list
+ default: []
elements: str
devices:
description:
@@ -81,6 +82,7 @@ options:
components:
description: The target components to be updated. If not specified, all applicable device components are considered.
type: list
+ default: []
elements: str
schedule:
type: str
@@ -93,13 +95,28 @@ options:
- RebootNow
- StageForNextReboot
default: RebootNow
+ reboot_type:
+ version_added: '8.3.0'
+ type: str
+ description:
+ - This option provides the choices to reboot the server immediately after the firmware update.
+ - This is applicable when I(schedule) is C(RebootNow).
+ - C(GracefulRebootForce) performs a graceful reboot with forced shutdown.
+ - C(GracefulReboot) performs a graceful reboot without forced shutdown.
+ - C(PowerCycle) performs a power cycle for a hard reset on the device.
+ choices:
+ - GracefulReboot
+ - GracefulRebootForce
+ - PowerCycle
+ default: GracefulRebootForce
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Jagadeesh N V (@jagadeeshnv)"
+ - "Abhishek Sinha (@ABHISHEK-SINHA10)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -200,7 +217,7 @@ EXAMPLES = r'''
devices:
- id: 12345
components:
- - Lifecycle Controller
+ - Lifecycle Controller
- id: 12346
components:
- Enterprise UEFI Diagnostics
@@ -237,6 +254,17 @@ EXAMPLES = r'''
- id: 12345
components:
- iDRAC with Lifecycle Controller
+
+- name: "Update firmware using baseline name and components and perform Powercycle."
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ components:
+ - BIOS
+ reboot_type: PowerCycle
'''
RETURN = r'''
@@ -325,7 +353,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
@@ -361,8 +389,11 @@ def job_payload_for_update(rest_obj, module, target_data, baseline=None):
{"Key": "signVerify", "Value": "true"}]
# reboot applicable only if staging false
if schedule == "RebootNow":
- params.append({"Key": "rebootType", "Value": "3"})
- # reboot_dict = {"GracefulReboot": "2", "GracefulRebootForce": "3", "PowerCycle": "1"}
+ reboot_dict = {"PowerCycle": "1",
+ "GracefulReboot": "2",
+ "GracefulRebootForce": "3"}
+ reboot_type = module.params["reboot_type"]
+ params.append({"Key": "rebootType", "Value": reboot_dict[reboot_type]})
payload = {
"Id": 0, "JobName": "Firmware Update Task",
"JobDescription": FW_JOB_DESC, "Schedule": "startnow",
@@ -504,7 +535,7 @@ def single_dup_update(rest_obj, module):
device_id_tags = _validate_device_attributes(module)
device_ids, id_tag_map = get_device_ids(rest_obj, module, device_id_tags)
if module.check_mode:
- module.exit_json(msg=CHANGES_FOUND)
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
upload_status, token = upload_dup_file(rest_obj, module)
if upload_status:
report_payload = get_dup_applicability_payload(token, device_ids=device_ids, group_ids=group_ids,
@@ -549,7 +580,7 @@ def baseline_based_update(rest_obj, module, baseline, dev_comp_map):
if not compliance_report_list:
module.exit_json(msg=NO_CHANGES_MSG)
if module.check_mode:
- module.exit_json(msg=CHANGES_FOUND)
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
return compliance_report_list
@@ -605,6 +636,9 @@ def main():
"components": {"type": "list", "elements": 'str', "default": []},
"baseline_name": {"type": "str"},
"schedule": {"type": 'str', "choices": ['RebootNow', 'StageForNextReboot'], "default": 'RebootNow'},
+ "reboot_type": {"type": 'str',
+ "choices": ['PowerCycle', 'GracefulReboot', 'GracefulRebootForce'],
+ "default": 'GracefulRebootForce'},
"devices": {
"type": 'list', "elements": 'dict',
"options": {
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py
index d6282db3a..6c2c6a1c5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,8 @@ short_description: Create, modify, or delete a firmware baseline on OpenManage E
description: This module allows to create, modify, or delete a firmware baseline on OpenManage Enterprise or OpenManage Enterprise Modular.
version_added: "2.0.0"
author:
- - Jagadeesh N V(@jagadeeshnv)
+ - "Jagadeesh N V(@jagadeeshnv)"
+ - "Kritika Bhateja (@Kritika-Bhateja-03)"
extends_documentation_fragment:
- dellemc.openmanage.ome_auth_options
options:
@@ -61,12 +62,12 @@ options:
type: bool
description:
- Indicates whether firmware downgrade is allowed for the devices in the baseline.
- - This value will be set to C(True) by default, if not provided during baseline creation.
+ - This value will be set to C(true) by default, if not provided during baseline creation.
is_64_bit:
type: bool
description:
- Indicates if the repository contains 64-bit DUPs.
- - This value will be set to C(True) by default, if not provided during baseline creation.
+ - This value will be set to C(true) by default, if not provided during baseline creation.
device_ids:
type: list
elements: int
@@ -95,14 +96,22 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 600
version_added: 3.4.0
+ filter_no_reboot_required:
+ description:
+ - Select only components with no reboot required allows to create a
+ firmware/driver baseline that consists of only the components of the
+ target devices that don't require a reboot of the target devices.
+ type: bool
+ version_added: 8.1.0
+
requirements:
- "python >= 3.8.6"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
- I(device_group_names) option is not applicable for OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
@@ -122,6 +131,20 @@ EXAMPLES = r'''
- 1010
- 2020
+- name: Create baseline for device IDs with no reboot required
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ filter_no_reboot_required: true
+ device_ids:
+ - 1010
+ - 2020
+
- name: Create baseline for servicetags
dellemc.openmanage.ome_firmware_baseline:
hostname: "192.168.0.1"
@@ -135,6 +158,20 @@ EXAMPLES = r'''
- "SVCTAG1"
- "SVCTAG2"
+- name: Create baseline for servicetags with no reboot required
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ filter_no_reboot_required: true
+ device_service_tags:
+ - "SVCTAG1"
+ - "SVCTAG2"
+
- name: Create baseline for device groups without job tracking
dellemc.openmanage.ome_firmware_baseline:
hostname: "192.168.0.1"
@@ -147,7 +184,7 @@ EXAMPLES = r'''
device_group_names:
- "Group1"
- "Group2"
- job_wait: no
+ job_wait: false
- name: Modify an existing baseline
dellemc.openmanage.ome_firmware_baseline:
@@ -163,8 +200,18 @@ EXAMPLES = r'''
- "Group3"
- "Group4"
- "Group5"
- downgrade_enabled: no
- is_64_bit: yes
+ downgrade_enabled: false
+ is_64_bit: true
+
+- name: Modify no reboot filter in existing baseline
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "existing_baseline_name"
+ new_baseline_name: "new_baseline_name"
+ filter_no_reboot_required: true
- name: Delete a baseline
dellemc.openmanage.ome_firmware_baseline:
@@ -192,6 +239,7 @@ baseline_status:
"Description": "BASELINE DESCRIPTION",
"DeviceComplianceReports": [],
"DowngradeEnabled": true,
+ "FilterNoRebootRequired": true,
"Id": 23,
"Is64Bit": true,
"Name": "my_baseline",
@@ -267,7 +315,6 @@ GROUP_ID = 6000
import json
import time
-from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
@@ -329,7 +376,7 @@ def get_dev_ids(module, rest_obj, param, devkey):
targets = []
if values:
devlist = values
- device_resp = dict([(device[devkey], device) for device in devlist])
+ device_resp = {device[devkey]: device for device in devlist}
for st in paramlist:
if st in device_resp:
djson = device_resp[st]
@@ -353,7 +400,7 @@ def get_group_ids(module, rest_obj):
targets = []
if values:
grplist = values
- device_resp = dict([(str(grp['Name']), grp) for grp in grplist])
+ device_resp = {str(grp['Name']): grp for grp in grplist}
for st in grp_name_list:
if st in device_resp:
djson = device_resp[st]
@@ -413,6 +460,7 @@ def _get_baseline_payload(module, rest_obj):
"Targets": targets
}
baseline_payload['Description'] = module.params.get("baseline_description")
+ baseline_payload['FilterNoRebootRequired'] = module.params.get("filter_no_reboot_required")
de = module.params.get("downgrade_enabled")
baseline_payload['DowngradeEnabled'] = de if de is not None else True
sfb = module.params.get("is_64_bit")
@@ -434,26 +482,29 @@ def create_baseline(module, rest_obj):
def update_modify_payload(module, rest_obj, modify_payload, current_baseline):
- paylist = ['Name', "CatalogId", "RepositoryId", 'Description', 'DowngradeEnabled', 'Is64Bit']
+ paylist = ['Name', "CatalogId", "RepositoryId", 'Description', 'DowngradeEnabled', 'Is64Bit',
+ 'FilterNoRebootRequired']
diff_tuple = recursive_diff(modify_payload, current_baseline)
diff = 0
- payload = dict([(item, current_baseline.get(item)) for item in paylist])
- if diff_tuple:
- if diff_tuple[0]:
+ payload = {item: current_baseline.get(item) for item in paylist}
+ try:
+ if diff_tuple and diff_tuple[0]:
diff += 1
payload.update(diff_tuple[0])
- payload['Targets'] = current_baseline.get('Targets', [])
- inp_targets_list = get_target_list(module, rest_obj)
- if inp_targets_list:
- inp_target_dict = dict([(item['Id'], item['Type']['Id']) for item in inp_targets_list])
- cur_target_dict = dict([(item['Id'], item['Type']['Id']) for item in current_baseline.get('Targets', [])])
- diff_tuple = recursive_diff(inp_target_dict, cur_target_dict)
- if diff_tuple:
- diff += 1
- payload['Targets'] = inp_targets_list
- if diff == 0:
- module.exit_json(msg=NO_CHANGES_MSG)
- payload['Id'] = current_baseline['Id']
+ payload['Targets'] = current_baseline.get('Targets', [])
+ inp_targets_list = get_target_list(module, rest_obj)
+ if inp_targets_list:
+ inp_target_dict = {item['Id']: item['Type']['Id'] for item in inp_targets_list}
+ cur_target_dict = {item['Id']: item['Type']['Id'] for item in current_baseline.get('Targets', [])}
+ diff_tuple = recursive_diff(inp_target_dict, cur_target_dict)
+ if diff_tuple:
+ diff += 1
+ payload['Targets'] = inp_targets_list
+ if diff == 0:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ payload['Id'] = current_baseline['Id']
+ except (IndexError, TypeError) as err:
+ module.fail_json(msg=str(err))
return payload
@@ -478,6 +529,8 @@ def modify_baseline(module, rest_obj, baseline_list):
modify_payload['DowngradeEnabled'] = module.params.get("downgrade_enabled")
if module.params.get("is_64_bit") is not None:
modify_payload['Is64Bit'] = module.params.get("is_64_bit")
+ if module.params.get("filter_no_reboot_required") is not None:
+ modify_payload['FilterNoRebootRequired'] = module.params.get("filter_no_reboot_required")
payload = update_modify_payload(module, rest_obj, modify_payload, current_baseline)
if module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True)
@@ -512,7 +565,8 @@ def main():
"device_service_tags": {"type": 'list', "elements": 'str'},
"device_group_names": {"type": 'list', "elements": 'str'},
"job_wait": {"type": 'bool', "default": True},
- "job_wait_timeout": {"type": 'int', "default": 600}
+ "job_wait_timeout": {"type": 'int', "default": 600},
+ "filter_no_reboot_required": {"type": 'bool'}
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -542,7 +596,7 @@ def main():
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
module.fail_json(msg=str(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py
index 9e138a002..af48fc151 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -64,7 +64,7 @@ requirements:
- "python >= 3.8.6"
author: "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -77,8 +77,8 @@ EXAMPLES = r'''
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_ids:
- - 11111
- - 22222
+ - 11111
+ - 22222
- name: Retrieves device based compliance report for specified service Tags
dellemc.openmanage.ome_firmware_baseline_compliance_info:
@@ -87,8 +87,8 @@ EXAMPLES = r'''
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_service_tags:
- - MXL1234
- - MXL4567
+ - MXL1234
+ - MXL4567
- name: Retrieves device based compliance report for specified group names
dellemc.openmanage.ome_firmware_baseline_compliance_info:
@@ -97,8 +97,8 @@ EXAMPLES = r'''
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_group_names:
- - "group1"
- - "group2"
+ - "group1"
+ - "group2"
- name: Retrieves device compliance report for a specified baseline
dellemc.openmanage.ome_firmware_baseline_compliance_info:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py
index a98359169..261d67030 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -31,7 +31,7 @@ requirements:
- "python >= 3.8.6"
author: "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -104,7 +104,7 @@ from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
def get_specific_baseline(module, baseline_name, resp_data):
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py
index 29b7ed905..b437db3ae 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -96,9 +96,9 @@ options:
check_certificate:
type: bool
description:
- - The certificate warnings are ignored when I(repository_type) is HTTPS. If C(True). If not, certificate warnings
+ - The certificate warnings are ignored when I(repository_type) is HTTPS. If C(true). If not, certificate warnings
are not ignored.
- default: False
+ default: false
job_wait:
description:
- Provides the option to wait for job completion.
@@ -109,7 +109,7 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 600
version_added: 3.4.0
@@ -120,7 +120,8 @@ author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- If I(repository_password) is provided, then the module always reports the changed status.
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
- This module supports C(check_mode).
'''
@@ -138,7 +139,7 @@ EXAMPLES = r'''
source: "downloads.dell.com"
source_path: "catalog"
file_name: "catalog.gz"
- check_certificate: True
+ check_certificate: true
- name: Create a catalog from HTTP repository
dellemc.openmanage.ome_firmware_catalog:
@@ -191,7 +192,7 @@ EXAMPLES = r'''
catalog_name: "catalog_name"
catalog_description: "catalog_description"
repository_type: "DELL_ONLINE"
- check_certificate: True
+ check_certificate: true
- name: Modify a catalog using a repository from CIFS share
dellemc.openmanage.ome_firmware_catalog:
@@ -255,7 +256,7 @@ catalog_status:
"BaseLocation": null,
"BundlesCount": 0,
"Filename": "catalog.gz",
- "Id": 0,
+ "Id": 12,
"LastUpdated": null,
"ManifestIdentifier": null,
"ManifestVersion": null,
@@ -351,9 +352,11 @@ SETTLING_TIME = 3
import json
import time
+import os
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
@@ -408,7 +411,7 @@ def exit_catalog(module, rest_obj, catalog_resp, operation, msg):
msg = CATALOG_UPDATED.format(operation=operation)
time.sleep(SETTLING_TIME)
catalog = get_updated_catalog_info(module, rest_obj, catalog_resp)
- module.exit_json(msg=msg, catalog_status=catalog, changed=True)
+ module.exit_json(msg=msg, catalog_status=remove_key(catalog), changed=True)
def _get_catalog_payload(params, name):
@@ -521,11 +524,21 @@ def modify_catalog(module, rest_obj, catalog_list, all_catalog):
new_catalog_current_setting = catalog_payload.copy()
repo_id = new_catalog_current_setting["Repository"]["Id"]
del new_catalog_current_setting["Repository"]["Id"]
+ fname = modify_payload.get('Filename')
+ # Special case handling for .gz catalog files
+ if fname and fname.lower().endswith('.gz'):
+ modify_payload['Filename'] = new_catalog_current_setting.get('Filename')
+ src_path = modify_payload.get('SourcePath')
+ if src_path is None:
+ src_path = new_catalog_current_setting.get('SourcePath', "")
+ if src_path.lower().endswith('.gz'):
+ src_path = os.path.dirname(src_path)
+ modify_payload['SourcePath'] = os.path.join(src_path, fname)
diff = compare_payloads(modify_payload, new_catalog_current_setting)
- if module.check_mode and diff:
- module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
if not diff:
module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False)
+ if module.check_mode:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
new_catalog_current_setting["Repository"].update(modify_payload["Repository"])
catalog_payload.update(modify_payload)
catalog_payload["Repository"] = new_catalog_current_setting["Repository"]
@@ -637,7 +650,7 @@ def main():
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
- module.fail_json(msg=str(err))
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py
index 411a6221a..3daf178cf 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -73,10 +73,10 @@ options:
requirements:
- "python >= 3.8.6"
notes:
- - This module manages only static device groups on Dell EMC OpenManage Enterprise.
+ - This module manages only static device groups on Dell OpenManage Enterprise.
- If a device group with the name I(parent_group_name) does not exist, a new device group with the same name is created.
- Make sure the entered parent group is not the descendant of the provided group.
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py
index 4906dcf55..9a627b234 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -32,7 +32,7 @@ options:
choices: [present, absent]
pool_name:
type: str
- required: True
+ required: true
description:
- This option is mandatory for I(state) when creating, modifying and deleting an identity pool.
new_pool_name:
@@ -129,7 +129,7 @@ author:
- "Sajna Shetty(@Sajna-Shetty)"
- "Deepak Joshi(@Dell-Deepak-Joshi))"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -145,25 +145,25 @@ EXAMPLES = r'''
pool_name: "pool1"
pool_description: "Identity pool with Ethernet, FCoE, iSCSI and FC settings"
ethernet_settings:
- starting_mac_address: "50:50:50:50:50:00"
- identity_count: 60
+ starting_mac_address: "50:50:50:50:50:00"
+ identity_count: 60
fcoe_settings:
- starting_mac_address: "70:70:70:70:70:00"
- identity_count: 75
+ starting_mac_address: "70:70:70:70:70:00"
+ identity_count: 75
iscsi_settings:
- starting_mac_address: "60:60:60:60:60:00"
- identity_count: 30
- initiator_config:
- iqn_prefix: "iqn.myprefix."
- initiator_ip_pool_settings:
- ip_range: "10.33.0.1-10.33.0.255"
- subnet_mask: "255.255.255.0"
- gateway: "192.168.4.1"
- primary_dns_server : "10.8.8.8"
- secondary_dns_server : "8.8.8.8"
+ starting_mac_address: "60:60:60:60:60:00"
+ identity_count: 30
+ initiator_config:
+ iqn_prefix: "iqn.myprefix."
+ initiator_ip_pool_settings:
+ ip_range: "10.33.0.1-10.33.0.255"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.4.1"
+ primary_dns_server: "10.8.8.8"
+ secondary_dns_server: "8.8.8.8"
fc_settings:
- starting_address: "30:30:30:30:30:00"
- identity_count: 45
+ starting_address: "30:30:30:30:30:00"
+ identity_count: 45
- name: Create an identity pool using only ethernet settings
dellemc.openmanage.ome_identity_pool:
@@ -174,8 +174,8 @@ EXAMPLES = r'''
pool_name: "pool2"
pool_description: "create identity pool with ethernet"
ethernet_settings:
- starting_mac_address: "aa-bb-cc-dd-ee-aa"
- identity_count: 80
+ starting_mac_address: "aa-bb-cc-dd-ee-aa"
+ identity_count: 80
- name: Modify an identity pool
dellemc.openmanage.ome_identity_pool:
@@ -187,11 +187,11 @@ EXAMPLES = r'''
new_pool_name: "pool3"
pool_description: "modifying identity pool with ethernet and fcoe settings"
ethernet_settings:
- starting_mac_address: "90-90-90-90-90-90"
- identity_count: 61
+ starting_mac_address: "90-90-90-90-90-90"
+ identity_count: 61
fcoe_settings:
- starting_mac_address: "aabb.ccdd.5050"
- identity_count: 77
+ starting_mac_address: "aabb.ccdd.5050"
+ identity_count: 77
- name: Modify an identity pool using iSCSI and FC settings
dellemc.openmanage.ome_identity_pool:
@@ -265,7 +265,7 @@ import binascii
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
IDENTITY_URI = "IdentityPoolService/IdentityPools"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py
index 26b0d545e..8a875c756 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -37,11 +37,21 @@ options:
filter:
description: Filter records by the values supported.
type: str
+ fetch_execution_history:
+ description:
+ - Fetches the execution history of the job.
+ - I(fetch_execution_history) is only applicable when valid I(job_id) is given.
+ - When C(true), fetches all the execution history details.
+ - When C(false), fetches only the job info and last execution details.
+ type: bool
+ default: false
requirements:
- - "python >= 3.8.6"
-author: "Jagadeesh N V(@jagadeeshnv)"
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V (@jagadeeshnv)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -73,6 +83,14 @@ EXAMPLES = r'''
skip: 1
filter: "JobType/Id eq 8"
+- name: Get detail job execution history with last execution detail for a job.
+ dellemc.openmanage.ome_job_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: 12345
+ fetch_execution_history: true
'''
RETURN = r'''
@@ -89,57 +107,200 @@ job_info:
sample: {
"value": [
{
- "Builtin": false,
- "CreatedBy": "system",
- "Editable": true,
+ "Id": 10429,
+ "JobName": "Discovery-201",
+ "JobDescription": "Discovery-201",
+ "NextRun": null,
+ "LastRun": "2023-06-07 09:33:07.161",
+ "StartTime": null,
"EndTime": null,
- "Id": 12345,
- "JobDescription": "Refresh Inventory for Device",
- "JobName": "Refresh Inventory for Device",
- "JobStatus": {
- "Id": 2080,
- "Name": "New"
+ "Schedule": "startnow",
+ "State": "Enabled",
+ "CreatedBy": "admin",
+ "UpdatedBy": "admin",
+ "Visible": true,
+ "Editable": true,
+ "Builtin": false,
+ "UserGenerated": true,
+ "Targets": [],
+ "Params": [],
+ "LastRunStatus": {
+ "Id": 2070,
+ "Name": "Failed"
},
"JobType": {
- "Id": 8,
- "Internal": false,
- "Name": "Inventory_Task"
+ "Id": 101,
+ "Name": "Discovery_Task",
+ "Internal": false
},
- "LastRun": "2000-01-29 10:51:34.776",
- "LastRunStatus": {
- "Id": 2060,
- "Name": "Completed"
+ "JobStatus": {
+ "Id": 2080,
+ "Name": "New"
},
- "NextRun": null,
- "Params": [],
- "Schedule": "",
- "StartTime": null,
- "State": "Enabled",
- "Targets": [
+ "ExecutionHistories": [
{
- "Data": "''",
- "Id": 123123,
- "JobId": 12345,
- "TargetType": {
- "Id": 1000,
- "Name": "DEVICE"
- }
+ "Id": 1243224,
+ "JobName": "Discovery-201",
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:33:07.148",
+ "EndTime": "2023-06-07 09:33:08.403",
+ "LastUpdateTime": "2023-06-07 09:33:08.447185",
+ "ExecutedBy": "admin",
+ "JobId": 10429,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ },
+ "ExecutionHistoryDetails": [
+ {
+ "Id": 1288519,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:33:07.525",
+ "EndTime": "2023-06-07 09:33:08.189",
+ "ElapsedTime": "00:00:00",
+ "Key": "198.168.0.1",
+ "Value": "Running\nDiscovery of target 198.168.0.1 started
+ .\nDiscovery target resolved to IP 198.168.0.1 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry the
+ operation.\n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243224,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ },
+ {
+ "Id": 1288518,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:33:07.521",
+ "EndTime": "2023-06-07 09:33:08.313",
+ "ElapsedTime": "00:00:00",
+ "Key": "198.168.0.2",
+ "Value": "Running\nDiscovery of target 198.168.0.2 started.
+ \nDiscovery target resolved to IP 198.168.0.2 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry the
+ operation.\n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243224,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ }
+ ]
+ },
+ {
+ "Id": 1243218,
+ "JobName": "Discovery-201",
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:30:55.064",
+ "EndTime": "2023-06-07 09:30:56.338",
+ "LastUpdateTime": "2023-06-07 09:30:56.365294",
+ "ExecutedBy": "admin",
+ "JobId": 10429,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ },
+ "ExecutionHistoryDetails": [
+ {
+ "Id": 1288512,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:30:55.441",
+ "EndTime": "2023-06-07 09:30:56.085",
+ "ElapsedTime": "00:00:00",
+ "Key": "198.168.0.1",
+ "Value": "Running\nDiscovery of target 198.168.0.1 started.
+ \nDiscovery target resolved to IP 198.168.0.1 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry the
+ operation.\n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243218,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ },
+ {
+ "Id": 1288511,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:30:55.439",
+ "EndTime": "2023-06-07 09:30:56.21",
+ "ElapsedTime": "00:00:00",
+ "Key": "198.168.0.2",
+ "Value": "Running\nDiscovery of target 198.168.0.2 started.
+ \nDiscovery target resolved to IP 198.168.0.2 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry
+ the operation.\n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243218,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ }
+ ]
}
],
- "UpdatedBy": null,
- "Visible": true
+ "LastExecutionDetail": {
+ "Id": 1288519,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:33:07.525",
+ "EndTime": "2023-06-07 09:33:08.189",
+ "ElapsedTime": null,
+ "Key": "198.168.0.1",
+ "Value": "Running\nDiscovery of target 198.168.0.1 started.
+ \nDiscovery target resolved to IP 198.168.0.1 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry the operation.
+ \n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243224,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ }
}
- ]}
+]
+}
'''
import json
-from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict, remove_key
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
JOBS_URI = "JobService/Jobs"
+EXECUTION_HISTORIES_URI = "JobService/Jobs({0})/ExecutionHistories"
+LAST_EXECUTION_DETAIL_URI = "JobService/Jobs({0})/LastExecutionDetail"
def _get_query_parameters(module_params):
@@ -154,6 +315,45 @@ def _get_query_parameters(module_params):
return query_parameter
+def get_uri_detail(rest_obj, uri):
+ try:
+ result = []
+ resp = rest_obj.invoke_request('GET', uri)
+ json_data = resp.json_data
+ if value := json_data.get('value'):
+ for each_element in value:
+ each_element.get('JobStatus', {}).pop('@odata.type', None)
+ execution_history_detail_uri = each_element.get('ExecutionHistoryDetails@odata.navigationLink', '')[5:]
+ if execution_history_detail_uri:
+ execution_history_detail = get_uri_detail(rest_obj, execution_history_detail_uri)
+ each_element.update({"ExecutionHistoryDetails": execution_history_detail})
+ result.append(strip_substr_dict(each_element))
+ else:
+ json_data.get('JobStatus', {}).pop('@odata.type', None)
+ result = strip_substr_dict(json_data)
+ except Exception:
+ pass
+ return result
+
+
+def get_execution_history_of_a_job(rest_obj, job_id):
+ try:
+ execution_histories = get_uri_detail(
+ rest_obj, EXECUTION_HISTORIES_URI.format(job_id))
+ except Exception:
+ pass
+ return execution_histories
+
+
+def last_execution_detail_of_a_job(rest_obj, job_id):
+ try:
+ last_execution_detail = get_uri_detail(
+ rest_obj, LAST_EXECUTION_DETAIL_URI.format(job_id))
+ except Exception:
+ pass
+ return last_execution_detail
+
+
def main():
specs = {
"job_id": {"required": False, "type": 'int'},
@@ -162,6 +362,7 @@ def main():
"skip": {"type": 'int', "required": False},
"filter": {"type": 'str', "required": False},
}},
+ "fetch_execution_history": {"type": 'bool', "default": False},
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -177,7 +378,13 @@ def main():
job_id = module.params.get("job_id")
jpath = "{0}({1})".format(JOBS_URI, job_id)
resp = rest_obj.invoke_request('GET', jpath)
- job_facts = resp.json_data
+ job_facts = remove_key(resp.json_data)
+ execution_detail = []
+ if module.params.get("fetch_execution_history"):
+ execution_detail = get_execution_history_of_a_job(rest_obj, job_id)
+ last_execution = last_execution_detail_of_a_job(rest_obj, job_id)
+ job_facts.update({'ExecutionHistories': execution_detail,
+ 'LastExecutionDetail': last_execution})
resp_status.append(resp.status_code)
else:
# query applicable only for all jobs list fetching
@@ -185,20 +392,25 @@ def main():
if query_param:
resp = rest_obj.invoke_request('GET', JOBS_URI, query_param=query_param)
job_facts = resp.json_data
+ job_facts = remove_key(job_facts)
resp_status.append(resp.status_code)
else:
# Fetch all jobs, filter and pagination options
job_report = rest_obj.get_all_report_details(JOBS_URI)
- job_facts = {"@odata.context": job_report["resp_obj"].json_data["@odata.context"],
- "@odata.count": len(job_report["report_list"]),
- "value": job_report["report_list"]}
- if job_facts["@odata.count"] > 0:
+ job_facts = {"value": job_report["report_list"]}
+ job_facts = remove_key(job_facts)
+ if len(job_facts["value"]) > 0:
resp_status.append(200)
+ for each_value in job_facts["value"]:
+ job_id = each_value["Id"] if "Id" in each_value else None
+ last_execution = last_execution_detail_of_a_job(rest_obj, job_id)
+ each_value.update({'ExecutionHistories': [],
+ 'LastExecutionDetail': last_execution})
except HTTPError as httperr:
module.fail_json(msg=str(httperr), job_info=json.load(httperr))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err:
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
module.fail_json(msg=str(err))
if 200 in resp_status:
module.exit_json(msg="Successfully fetched the job info", job_info=job_facts)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py
index 08e307c78..044601d0d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -24,12 +24,12 @@ extends_documentation_fragment:
- dellemc.openmanage.omem_auth_options
options:
target_port:
- required: True
+ required: true
description: "The ID of the port in the switch to breakout. Enter the port ID in the format: service tag:port.
For example, 2HB7NX2:ethernet1/1/13."
type: str
breakout_type:
- required: True
+ required: true
description:
- The preferred breakout type. For example, 4X10GE.
- To revoke the default breakout configuration, enter 'HardwareDefault'.
@@ -38,7 +38,7 @@ requirements:
- "python >= 3.8.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py
index 90ac7a837..393f44a71 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -67,7 +67,7 @@ requirements:
author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -185,7 +185,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
VLAN_CONFIG = "NetworkConfigurationService/Networks"
@@ -254,7 +254,7 @@ def create_vlan(module, rest_obj, vlans):
def delete_vlan(module, rest_obj, vlan_id):
if module.check_mode:
module.exit_json(changed=True, msg=CHECK_MODE_MSG)
- resp = rest_obj.invoke_request("DELETE", VLAN_ID_CONFIG.format(Id=vlan_id))
+ rest_obj.invoke_request("DELETE", VLAN_ID_CONFIG.format(Id=vlan_id))
module.exit_json(msg="Successfully deleted the VLAN.", changed=True)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py
index f1de512be..ea9861a3b 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -40,7 +40,7 @@ requirements:
- "python >= 3.8.6"
author: "Deepak Joshi(@deepakjoshishri)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py
index 7ead69f70..0122848c4 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -25,7 +25,7 @@ options:
power_state:
description: Desired end power state.
type: str
- required: True
+ required: true
choices: ['on', 'off', 'coldboot', 'warmboot', 'shutdown']
device_service_tag:
description:
@@ -41,7 +41,7 @@ requirements:
- "python >= 3.8.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py
index d2f7a87c8..eed9a45fd 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -165,7 +165,7 @@ requirements:
- "python >= 3.8.6"
author: "Jagadeesh N V (@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
- C(assign) operation on a already assigned profile will not redeploy.
'''
@@ -193,7 +193,7 @@ EXAMPLES = r'''
name_prefix: "omam_profile"
number_of_profiles: 1
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.1"
iso_path: "path/to/my_iso.iso"
@@ -210,7 +210,7 @@ EXAMPLES = r'''
name_prefix: "omam_profile"
number_of_profiles: 1
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: CIFS
share_ip: "192.168.0.2"
share_user: "username"
@@ -230,7 +230,7 @@ EXAMPLES = r'''
new_name: "modified profile"
description: "new description"
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.3"
iso_path: "path/to/my_iso.iso"
@@ -266,7 +266,7 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
command: "delete"
filters:
- SelectAll: True
+ SelectAll: true
Filters: =contains(ProfileName,'Profile 00002')
- name: Delete profiles using profile list filter
@@ -291,7 +291,7 @@ EXAMPLES = r'''
name: "Profile 00001"
device_id: 12456
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.1"
iso_path: "path/to/my_iso.iso"
@@ -305,10 +305,10 @@ EXAMPLES = r'''
ShutdownType: 0
TimeToWaitBeforeShutdown: 300
EndHostPowerState: 1
- StrictCheckingVlan: True
+ StrictCheckingVlan: true
Schedule:
- RunNow: True
- RunLater: False
+ RunNow: true
+ RunLater: false
- name: Unassign a profile using profile name
dellemc.openmanage.ome_profile:
@@ -327,7 +327,7 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
command: "unassign"
filters:
- SelectAll: True
+ SelectAll: true
Filters: =contains(ProfileName,'Profile 00003')
- name: Unassign profiles using profile list filter
@@ -600,7 +600,7 @@ def assign_profile(module, rest_obj):
ad_opts = mparam.get("attributes")
for opt in ad_opts_list:
if ad_opts and ad_opts.get(opt):
- diff = attributes_check(module, rest_obj, ad_opts, prof['Id'])
+ attributes_check(module, rest_obj, ad_opts, prof['Id'])
payload[opt] = ad_opts.get(opt)
if module.check_mode:
module.exit_json(msg=CHANGES_MSG, changed=True)
@@ -710,7 +710,7 @@ def modify_profile(module, rest_obj):
if diff:
if module.check_mode:
module.exit_json(msg=CHANGES_MSG, changed=True)
- resp = rest_obj.invoke_request('PUT', PROFILE_VIEW + "({0})".format(payload['Id']), data=payload)
+ rest_obj.invoke_request('PUT', PROFILE_VIEW + "({0})".format(payload['Id']), data=payload)
module.exit_json(msg="Successfully modified the profile.", changed=True)
module.exit_json(msg=NO_CHANGES_MSG)
@@ -724,7 +724,7 @@ def delete_profile(module, rest_obj):
module.fail_json(msg="Profile has to be in an unassigned state for it to be deleted.")
if module.check_mode:
module.exit_json(msg=CHANGES_MSG, changed=True)
- resp = rest_obj.invoke_request('DELETE', PROFILE_VIEW + "({0})".format(prof['Id']))
+ rest_obj.invoke_request('DELETE', PROFILE_VIEW + "({0})".format(prof['Id']))
module.exit_json(msg="Successfully deleted the profile.", changed=True)
else:
module.exit_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name')))
@@ -732,7 +732,7 @@ def delete_profile(module, rest_obj):
payload = mparam.get('filters')
if module.check_mode:
module.exit_json(msg=CHANGES_MSG, changed=True)
- resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='Delete'), data=payload)
+ rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='Delete'), data=payload)
module.exit_json(msg="Successfully completed the delete operation.", changed=True)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile_info.py
new file mode 100644
index 000000000..4c5f07dd2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile_info.py
@@ -0,0 +1,410 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_profile_info
+short_description: Retrieve profiles with attribute details
+version_added: "7.2.0"
+description:
+ - "This module retrieve profiles with attributes on OpenManage Enterprise or OpenManage Enterprise Modular."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ profile_id:
+ description:
+ - Id of the profile.
+ - This is mutually exclusive with I(profile_name), I(system_query_options), I(template_id), and I(template_name).
+ type: int
+ profile_name:
+ description:
+ - Name of the profile.
+ - This is mutually exclusive with I(template_id), I(profile_id), I(system_query_options), and I(template_name).
+ type: str
+ template_id:
+ description:
+ - Provide the ID of the template to retrieve the list of profile(s) linked to it.
+ - This is mutually exclusive with I(profile_name), I(profile_id), I(system_query_options), and I(template_name).
+ type: int
+ template_name:
+ description:
+ - Provide the name of the template to retrieve the list of profile(s) linked to it.
+ - This is mutually exclusive with I(profile_name), I(profile_id), I(template_id), and I(system_query_options).
+ type: str
+ system_query_options:
+ description:
+ - Option for providing supported odata filters.
+ - "The profile list can be fetched and sorted based on ProfileName, TemplateName, TargetTypeId, TargetName,
+ ChassisName, ProfileState, LastRunStatus, or ProfileModified."
+ - This is mutually exclusive with I(profile_name), I(profile_id), I(template_id), and I(template_name).
+ - "C(Note) If I(profile_name), I(profile_id), I(template_id), or I(template_name) option is not provided, the
+ module retrieves all the profiles."
+ type: dict
+requirements:
+ - "python >= 3.9.6"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+notes:
+ - Run this module on a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Retrieve all profiles
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve profile using the name
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ profile_name: eprof 00001
+
+- name: Retrieve profile using the id
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ profile_id: 10129
+
+- name: Retrieve the profiles using the template name
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: t2
+
+- name: Retrieve the profiles using the template id
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 11
+
+- name: Retrieve the profiles based on the odata filters
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ system_query_options:
+ filter: TemplateName eq 'mytemplate'
+ orderby: ProfileState
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of profile information retrieval.
+ returned: always
+ type: str
+ sample: "Successfully retrieved the profile information."
+profile_info:
+ description: Information about the profile.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "Id": 71460,
+ "ProfileName": "Profile 00001",
+ "ProfileDescription": "from source template: (Template)",
+ "TemplateId": 8,
+ "TemplateName": "Template",
+ "DataSchemaId": 8,
+ "TargetId": 0,
+ "TargetName": null,
+ "TargetTypeId": 0,
+ "DeviceIdInSlot": 0,
+ "ChassisId": 0,
+ "ChassisName": null,
+ "GroupId": 0,
+ "GroupName": null,
+ "NetworkBootToIso": null,
+ "ProfileState": 0,
+ "DeploymentTaskId": 0,
+ "LastRunStatus": 2200,
+ "ProfileModified": 0,
+ "CreatedBy": "admin",
+ "EditedBy": null,
+ "CreatedDate": "2019-09-26 13:56:41.924966",
+ "LastEditDate": "2020-12-11 08:27:20.500564",
+ "LastDeployDate": "",
+ "AttributeIdMap": {
+ "4965": {
+ "Value": "hostname",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4963": {
+ "Value": "second floor",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4960": {
+ "Value": "10A",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4959": {
+ "Value": "OMAMDEV",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4957": {
+ "Value": "Dell LAB",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4958": {
+ "Value": null,
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4066": {
+ "Value": null,
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4231": {
+ "Value": "1",
+ "IsReadOnly": false,
+ "IsIgnored": false
+ },
+ "4229": {
+ "Value": "Disabled",
+ "IsReadOnly": false,
+ "IsIgnored": false
+ }
+ },
+ "AttributeDetails": {
+ "System": {
+ "Server Operating System": {
+ "ServerOS 1 Server Host Name": 4965
+ },
+ "Server Topology": {
+ "ServerTopology 1 Room Name": 4963,
+ "ServerTopology 1 Rack Slot": 4960,
+ "ServerTopology 1 Rack Name": 4959,
+ "ServerTopology 1 Data Center Name": 4957,
+ "ServerTopology 1 Aisle Name": 4958
+ }
+ },
+ "iDRAC": {
+ "Active Directory": {
+ "ActiveDirectory 1 Active Directory RAC Name": 4066
+ },
+ "NIC Information": {
+ "NIC 1 VLAN ID": 4231,
+ "NIC 1 Enable VLAN": 4229
+ }
+ }
+ }
+ }
+ ]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+
+PROFILE_VIEW = "ProfileService/Profiles"
+TEMPLATE_VIEW = "TemplateService/Templates"
+SUCCESS_MSG = "Successfully retrieved the profile information."
+NO_PROFILES_MSG = "Profiles with {0} {1} not found."
+SEPRTR = ','
+
+
+def get_template_details(module, rest_obj):
+ id = module.params.get('template_id')
+ query_param = {"$filter": "Id eq {0}".format(id)}
+ srch = 'Id'
+ t_id = 'template_id'
+ if not id:
+ id = module.params.get('template_name')
+ query_param = {"$filter": "Name eq '{0}'".format(id)}
+ srch = 'Name'
+ t_id = 'template_name'
+ resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ return xtype, id, t_id
+ module.exit_json(failed=True, msg="Template with {0} '{1}' not found.".format(srch.lower(), id))
+
+
+def get_profile_query(rest_obj, query, url_prm):
+ prof_list = []
+ try:
+ if query:
+ resp = rest_obj.get_all_items_with_pagination(PROFILE_VIEW, query_param=query)
+ prof_list = resp.get("value")
+ if url_prm:
+ url_resp = rest_obj.invoke_request("GET", "{0}{1}".format(PROFILE_VIEW, url_prm))
+ prof_list = [url_resp.json_data]
+ except Exception:
+ prof_list = []
+ return prof_list
+
+
+def construct_tree_str(nprfx, attr_detailed):
+ str_lst = nprfx.split(SEPRTR)
+ br = attr_detailed
+ for xs in str_lst:
+ if xs not in br:
+ br[xs] = {}
+ br = br.get(xs)
+ return br
+
+
+def recurse_subattr_list(subgroup, prefix, attr_detailed, attr_map):
+ rq_attr = ["Value", "IsReadOnly", "IsIgnored"]
+ if isinstance(subgroup, list):
+ for each_sub in subgroup:
+ nprfx = "{0}{1}{2}".format(prefix, SEPRTR, each_sub.get("DisplayName"))
+ if each_sub.get("SubAttributeGroups"):
+ recurse_subattr_list(each_sub.get("SubAttributeGroups"), nprfx, attr_detailed, attr_map)
+ else:
+ for attr in each_sub.get('Attributes'):
+ nd = construct_tree_str(nprfx, attr_detailed)
+ nd[attr['DisplayName']] = attr['AttributeId']
+ vlist = dict((xf, attr.get(xf)) for xf in rq_attr)
+ attr_map[attr['AttributeId']] = vlist
+
+
+def get_subattr_all(attr_dtls):
+ attr_detailed = {}
+ attr_map = {}
+ for each in attr_dtls:
+ recurse_subattr_list(each.get('SubAttributeGroups'), each.get('DisplayName'), attr_detailed, attr_map)
+ return attr_detailed, attr_map
+
+
+def get_attribute_detail_tree(rest_obj, prof_id):
+ try:
+ resp = rest_obj.invoke_request('GET', "{0}({1})/AttributeDetails".format(PROFILE_VIEW, prof_id))
+ attr_list = resp.json_data.get("AttributeGroups")
+ attr_detailed, attr_map = get_subattr_all(attr_list)
+ except Exception:
+ attr_detailed, attr_map = {}, {}
+ return attr_detailed, attr_map
+
+
+def main():
+ argument_spec = {
+ "profile_id": {"type": 'int'},
+ "profile_name": {"type": 'str'},
+ "template_id": {"type": 'int'},
+ "template_name": {"type": 'str'},
+ "system_query_options": {"type": 'dict'}
+ }
+ argument_spec.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[('profile_id', 'profile_name', 'template_name', 'template_id',
+ 'system_query_options')],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ query = {}
+ url_prm = None
+ prof_list = []
+ if module.params.get("template_id") or module.params.get("template_name"):
+ tmplt, value, name = get_template_details(module, rest_obj)
+ query["$filter"] = "TemplateName eq '{0}'".format(tmplt.get('Name'))
+ elif module.params.get("profile_id"):
+ url_prm = "({0})".format(module.params.get("profile_id"))
+ name = "profile_id"
+ value = module.params.get("profile_id")
+ elif module.params.get("profile_name"):
+ query["$filter"] = "ProfileName eq '{0}'".format(module.params.get("profile_name"))
+ name = "profile_name"
+ value = module.params.get("profile_name")
+ elif module.params.get("system_query_options"):
+ name = "provided"
+ value = "system_query_options"
+ for k, v in module.params.get("system_query_options").items():
+ query["${0}".format(k)] = v
+ if query or url_prm:
+ prof_list = get_profile_query(rest_obj, query, url_prm)
+ if module.params.get("profile_name"):
+ xprofs = []
+ pname = module.params.get("profile_name")
+ for xp in prof_list:
+ if xp.get("ProfileName") == pname:
+ xprofs.append(xp)
+ break
+ prof_list = xprofs
+ else:
+ resp = rest_obj.get_all_items_with_pagination(PROFILE_VIEW)
+ prof_list = resp.get("value")
+ if not bool(prof_list):
+ module.exit_json(msg=SUCCESS_MSG, profile_info=prof_list)
+ for xp in prof_list:
+ attr_tree, attr_map = get_attribute_detail_tree(rest_obj, xp["Id"])
+ xp["AttributeIdMap"] = attr_map
+ xp["AttributeDetails"] = attr_tree
+ strip_substr_dict(xp)
+ if prof_list:
+ module.exit_json(msg=SUCCESS_MSG, profile_info=prof_list) # ,xcount=len(prof_list))
+ else:
+ module.exit_json(msg=NO_PROFILES_MSG.format(name, value), failed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError,
+ AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py
index 81e3cb2ca..12286bff3 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py
index d30e7f382..60b436a29 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -53,7 +53,7 @@ options:
- ID of the NIC or port number.
- C(Note) This will not be validated.
type: str
- required: True
+ required: true
team:
description:
- Group two or more ports. The ports must be connected to the same pair of Ethernet switches.
@@ -86,7 +86,7 @@ options:
- The I(names) can be retrieved using the M(dellemc.openmanage.ome_network_vlan_info)
type: list
elements: str
- required: True
+ required: true
job_wait:
description:
- Provides the option to wait for job completion.
@@ -95,7 +95,7 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 120
requirements:
@@ -103,7 +103,7 @@ requirements:
author: "Jagadeesh N V (@jagadeeshnv)"
notes:
- This module supports C(check_mode).
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
'''
EXAMPLES = r'''
@@ -120,13 +120,13 @@ EXAMPLES = r'''
nic_teaming: LACP
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan1
- nic_identifier: NIC.Mezzanine.1A-2-1
- team: yes
+ team: true
untagged_network: 3
tagged_networks:
names:
@@ -144,13 +144,13 @@ EXAMPLES = r'''
nic_teaming: NoTeaming
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan2
- nic_identifier: NIC.Mezzanine.1A-2-1
- team: yes
+ team: true
untagged_network: 3
tagged_networks:
names:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py
index b4cd907eb..2e790fc08 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -79,7 +79,7 @@ requirements:
author:
- "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_info.py
new file mode 100644
index 000000000..9ce352d5e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_info.py
@@ -0,0 +1,699 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_smart_fabric_info
+short_description: Retrieves the information of smart fabrics inventoried by OpenManage Enterprise Modular
+version_added: "7.1.0"
+description:
+ - This module retrieves the list of smart fabrics in the inventory of OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ fabric_id:
+ description:
+ - Unique Id of the fabric.
+ - I(fabric_id) is mutually exclusive with I(fabric_name).
+ type: str
+ fabric_name:
+ description:
+ - Name of the fabric.
+ - I(fabric_name) is mutually exclusive with I(fabric_id).
+ type: str
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Kritika Bhateja(@Kritka-Bhateja)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve details of all smart fabrics
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve details of a specific smart fabric identified by its fabric ID
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+
+- name: Retrieve details of a specific smart fabric identified by its fabric name
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_name: "f1"
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Status of smart fabric information retrieval.
+ returned: always
+ sample: "Successfully retrieved the smart fabric information."
+smart_fabric_info:
+ type: list
+ description: Returns the information about smart fabric.
+ returned: success
+ sample: [
+ {
+ "Description": "Fabric f1",
+ "FabricDesign": [
+ {
+ "Actions": {
+ "#NetworkService.GetApplicableNodes": {
+ "target": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/FabricDesign/NetworkService.GetApplicableNodes"
+ },
+ "Oem": {}
+ },
+ "FabricDesignNode": [
+ {
+ "ChassisName": "Chassis-X",
+ "NodeName": "Switch-B",
+ "Slot": "Slot-A2",
+ "Type": "WeaverSwitch"
+ },
+ {
+ "ChassisName": "Chassis-X",
+ "NodeName": "Switch-A",
+ "Slot": "Slot-A1",
+ "Type": "WeaverSwitch"
+ }
+ ],
+ "Name": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis",
+ "NetworkLink": [
+ {
+ "DestinationInterface": "ethernet1/1/38",
+ "DestinationNode": "Switch-B",
+ "SourceInterface": "ethernet1/1/38",
+ "SourceNode": "Switch-A"
+ },
+ {
+ "DestinationInterface": "ethernet1/1/37",
+ "DestinationNode": "Switch-B",
+ "SourceInterface": "ethernet1/1/37",
+ "SourceNode": "Switch-A"
+ },
+ {
+ "DestinationInterface": "ethernet1/1/39",
+ "DestinationNode": "Switch-B",
+ "SourceInterface": "ethernet1/1/39",
+ "SourceNode": "Switch-A"
+ },
+ {
+ "DestinationInterface": "ethernet1/1/40",
+ "DestinationNode": "Switch-B",
+ "SourceInterface": "ethernet1/1/40",
+ "SourceNode": "Switch-A"
+ }
+ ]
+ }
+ ],
+ "FabricDesignMapping": [
+ {
+ "DesignNode": "Switch-A",
+ "PhysicalNode": "NODEID1"
+ },
+ {
+ "DesignNode": "Switch-B",
+ "PhysicalNode": "NODEID2"
+ }
+ ],
+ "Health": {
+ "Issues": [
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because the interface for an uplink
+ mentioned in the message is not in operational status.",
+ "Message": "The SmartFabric is not healthy because the interface JRWSV43:ethernet1/1/35 for uplink
+ 1ad54420-b145-49a1-9779-21a579ef6f2d is not in operational status.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0016",
+ "Resolution": "Make sure that all the uplink interfaces are in operational status.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ },
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because one or more VLTi links are not connected.",
+ "Message": "The SmartFabric is not healthy because all InterSwitch Links are not connected.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0017",
+ "Resolution": "Make sure that the VLTi cables for all ISLs are connected and operational as per the selected fabric design.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ },
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because the interface for an uplink
+ mentioned in the message is not in operational status.",
+ "Message": "The SmartFabric is not healthy because the interface 6H7J6Z2:ethernet1/1/35 for uplink
+ 1ad54420-b145-49a1-9779-21a579ef6f2d is not in operational status.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0016",
+ "Resolution": "Make sure that all the uplink interfaces are in operational status.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ },
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because one or more of the uplink interfaces are not bonded.",
+ "Message": "The SmartFabric is not healthy because the uplink 1ad54420-b145-49a1-9779-21a579ef6f2d
+ interface 6H7J6Z2:ethernet1/1/35 is not bonded to the other interfaces in the uplink.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0019",
+ "Resolution": "Make sure that the Link Aggregation Control Protocol (LACP) is enabled on all ports on the remote
+ switch to which the uplink ports from the fabric are connected.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ },
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because one or more of the uplink interfaces are not bonded.",
+ "Message": "The SmartFabric is not healthy because the uplink 1ad54420-b145-49a1-9779-21a579ef6f2d
+ interface JRWSV43:ethernet1/1/35 is not bonded to the other interfaces in the uplink.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0019",
+ "Resolution": "Make sure that the Link Aggregation Control Protocol (LACP) is enabled on all ports
+ on the remote switch to which the uplink ports from the fabric are connected.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ }
+ ],
+ "Status": "4000"
+ },
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "LifeCycleStatus": [
+ {
+ "Activity": "Create",
+ "Status": "2060"
+ }
+ ],
+ "Multicast": [
+ {
+ "FloodRestrict": true,
+ "IgmpVersion": "3",
+ "MldVersion": "2"
+ }
+ ],
+ "Name": "f1",
+ "OverrideLLDPConfiguration": "Disabled",
+ "ScaleVLANProfile": "Enabled",
+ "Servers": [
+ {
+ "ChassisServiceTag": "6H5S6Z2",
+ "ConnectionState": true,
+ "ConnectionStateReason": 101,
+ "DeviceCapabilities": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 7,
+ 8,
+ 9,
+ 41,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 208,
+ 16,
+ 17,
+ 18,
+ 212,
+ 30,
+ 31
+ ],
+ "DeviceManagement": [
+ {
+ "DnsName": "iDRAC-6GZK6Z2",
+ "InstrumentationName": "",
+ "MacAddress": "4c:d9:8f:7a:7c:43",
+ "ManagementId": 135185,
+ "ManagementProfile": [
+ {
+ "AgentName": "iDRAC",
+ "HasCreds": 0,
+ "ManagementId": 135185,
+ "ManagementProfileId": 135185,
+ "ManagementURL": "https://[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]:443/",
+ "ProfileId": "WSMAN_OOB",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:38.552",
+ "Version": "3.20.21.20"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "100.96.24.28"
+ },
+ {
+ "DnsName": "iDRAC-6GZK6Z2",
+ "InstrumentationName": "",
+ "MacAddress": "4c:d9:8f:7a:7c:43",
+ "ManagementId": 135186,
+ "ManagementProfile": [
+ {
+ "AgentName": "iDRAC",
+ "HasCreds": 0,
+ "ManagementId": 135186,
+ "ManagementProfileId": 135186,
+ "ManagementURL": "https://[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]:443/",
+ "ProfileId": "WSMAN_OOB",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:38.552",
+ "Version": "3.20.21.20"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]"
+ }
+ ],
+ "DeviceName": "MX-6H5S6Z2:Sled-1",
+ "DeviceServiceTag": "6GZK6Z2",
+ "Enabled": true,
+ "Id": 10071,
+ "Identifier": "6GZK6Z2",
+ "LastInventoryTime": "2019-10-29 09:30:38.552",
+ "LastStatusTime": "2019-10-29 09:41:51.051",
+ "ManagedState": 3000,
+ "Model": "PowerEdge MX840c",
+ "PowerState": 17,
+ "SlotConfiguration": {
+ "ChassisId": "10072",
+ "ChassisName": "MX-6H5S6Z2",
+ "ChassisServiceTag": "6H5S6Z2",
+ "DeviceType": "1000",
+ "SledBlockPowerOn": "None blocking",
+ "SlotId": "10084",
+ "SlotName": "Sled-1",
+ "SlotNumber": "1",
+ "SlotType": "2000"
+ },
+ "Status": 1000,
+ "SystemId": 1894,
+ "Type": 1000
+ }
+ ],
+ "Summary": {
+ "NodeCount": 2,
+ "ServerCount": 1,
+ "UplinkCount": 1
+ },
+ "Switches": [
+ {
+ "ChassisServiceTag": "6H5S6Z2",
+ "ConnectionState": true,
+ "ConnectionStateReason": 101,
+ "DeviceCapabilities": [
+ 1,
+ 2,
+ 3,
+ 5,
+ 7,
+ 8,
+ 9,
+ 207,
+ 18,
+ 602,
+ 603,
+ 604,
+ 605,
+ 606,
+ 607,
+ 608,
+ 609,
+ 610,
+ 611,
+ 612,
+ 613,
+ 614,
+ 615,
+ 616,
+ 617,
+ 618,
+ 619,
+ 620,
+ 621,
+ 622
+ ],
+ "DeviceManagement": [
+ {
+ "DnsName": "",
+ "InstrumentationName": "MX9116n Fabric Engine",
+ "MacAddress": "20:04:0F:4F:4E:04",
+ "ManagementId": 135181,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 135181,
+ "ManagementProfileId": 135181,
+ "ManagementURL": "",
+ "ProfileId": "",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:36.273"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "100.96.24.36"
+ },
+ {
+ "DnsName": "",
+ "InstrumentationName": "MX9116n Fabric Engine",
+ "MacAddress": "20:04:0F:4F:4E:04",
+ "ManagementId": 135182,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 135182,
+ "ManagementProfileId": 135182,
+ "ManagementURL": "",
+ "ProfileId": "",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:36.273"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": ""
+ }
+ ],
+ "DeviceName": "MX-6H5S6Z2:IOM-A2",
+ "DeviceServiceTag": "6H7J6Z2",
+ "Enabled": true,
+ "Id": 10074,
+ "Identifier": "6H7J6Z2",
+ "LastInventoryTime": "2019-10-29 09:30:36.332",
+ "LastStatusTime": "2019-10-29 09:31:00.931",
+ "ManagedState": 3000,
+ "Model": "MX9116n Fabric Engine",
+ "PowerState": 17,
+ "SlotConfiguration": {
+ "ChassisId": "10072",
+ "ChassisName": "MX-6H5S6Z2",
+ "ChassisServiceTag": "6H5S6Z2",
+ "DeviceType": "4000",
+ "SledBlockPowerOn": "null",
+ "SlotId": "10079",
+ "SlotName": "IOM-A2",
+ "SlotNumber": "2",
+ "SlotType": "4000"
+ },
+ "Status": 1000,
+ "SystemId": 2031,
+ "Type": 4000
+ },
+ {
+ "ChassisServiceTag": "6H5S6Z2",
+ "ConnectionState": true,
+ "ConnectionStateReason": 101,
+ "DeviceCapabilities": [
+ 1,
+ 2,
+ 3,
+ 5,
+ 7,
+ 8,
+ 9,
+ 207,
+ 18,
+ 602,
+ 603,
+ 604,
+ 605,
+ 606,
+ 607,
+ 608,
+ 609,
+ 610,
+ 611,
+ 612,
+ 613,
+ 614,
+ 615,
+ 616,
+ 617,
+ 618,
+ 619,
+ 620,
+ 621,
+ 622
+ ],
+ "DeviceManagement": [
+ {
+ "DnsName": "",
+ "InstrumentationName": "MX9116n Fabric Engine",
+ "MacAddress": "E8:B5:D0:52:61:46",
+ "ManagementId": 135183,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 135183,
+ "ManagementProfileId": 135183,
+ "ManagementURL": "",
+ "ProfileId": "",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:37.115"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "100.96.24.37"
+ },
+ {
+ "DnsName": "",
+ "InstrumentationName": "MX9116n Fabric Engine",
+ "MacAddress": "E8:B5:D0:52:61:46",
+ "ManagementId": 135184,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 135184,
+ "ManagementProfileId": 135184,
+ "ManagementURL": "",
+ "ProfileId": "",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:37.115"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": ""
+ }
+ ],
+ "DeviceName": "MX-6H5S6Z2:IOM-A1",
+ "DeviceServiceTag": "JRWSV43",
+ "Enabled": true,
+ "Id": 20881,
+ "Identifier": "JRWSV43",
+ "LastInventoryTime": "2019-10-29 09:30:37.172",
+ "LastStatusTime": "2019-10-29 09:31:00.244",
+ "ManagedState": 3000,
+ "Model": "MX9116n Fabric Engine",
+ "PowerState": 17,
+ "SlotConfiguration": {
+ "ChassisId": "10072",
+ "ChassisName": "MX-6H5S6Z2",
+ "ChassisServiceTag": "6H5S6Z2",
+ "DeviceType": "4000",
+ "SledBlockPowerOn": "null",
+ "SlotId": "10078",
+ "SlotName": "IOM-A1",
+ "SlotNumber": "1",
+ "SlotType": "4000"
+ },
+ "Status": 1000,
+ "SystemId": 2031,
+ "Type": 4000
+ }
+ ],
+ "Uplinks": [
+ {
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "MediaType": "Ethernet",
+ "Name": "u1",
+ "NativeVLAN": 1,
+ "Summary": {
+ "NetworkCount": 1,
+ "PortCount": 2
+ },
+ "UfdEnable": "Disabled"
+ }
+ ]
+ }
+ ]
+
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide
+ for more information about resource URI and its properties."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+
+FABRIC_URI = "NetworkService/Fabrics"
+# messages
+SUCCESS_MSG = "Successfully retrieved the smart fabric information."
+UNSUCCESS_MSG = "Unable to retrieve smart fabric information."
+INVALID_FABRIC_ID = "Unable to retrieve smart fabric information with fabric ID {0}."
+INVALID_FABRIC_NAME = "Unable to retrieve smart fabric information with fabric name {0}."
+
+
+def get_smart_fabric_details_via_id(module, rest_obj, fabric_id):
+ resp = []
+ try:
+ fabric_path = "{0}('{1}')".format(FABRIC_URI, fabric_id)
+ resp_det = rest_obj.invoke_request("GET", fabric_path)
+ resp = [resp_det.json_data]
+ except HTTPError:
+ module.exit_json(msg=INVALID_FABRIC_ID.format(fabric_id), failed=True)
+ return resp
+
+
+def fetch_smart_fabric_link_details(module, rest_obj, fabric_details_dict):
+ info_dict = {"Switches": "Switches@odata.navigationLink", "Servers": "Servers@odata.navigationLink",
+ "ISLLinks": "ISLLinks@odata.navigationLink", "Uplinks": "Uplinks@odata.navigationLink",
+ "Multicast": None, "FabricDesign": None}
+ info_list = ["Multicast", "FabricDesign"]
+ try:
+ for key in info_dict:
+ link = info_dict[key]
+ if key in info_list:
+ fabric_info_dict = fabric_details_dict[key]["@odata.id"]
+ uri = fabric_info_dict.strip("/api")
+ response = rest_obj.invoke_request('GET', uri)
+ if response.json_data:
+ details = [response.json_data]
+ else:
+ fabric_info_dict = fabric_details_dict.get(link)
+ uri = fabric_info_dict.strip("/api")
+ response = rest_obj.invoke_request('GET', uri)
+ if response.json_data:
+ details = response.json_data.get("value")
+ for item in details:
+ item = strip_substr_dict(item)
+ item = clean_data(item)
+ fabric_details_dict[key] = details
+ except HTTPError:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ return fabric_details_dict
+
+
+def strip_smart_fabric_info(module, rest_obj, smart_fabric_info):
+ for i in range(len(smart_fabric_info)):
+ fabrics_details = smart_fabric_info[i]
+ fabrics_details = fetch_smart_fabric_link_details(module, rest_obj, fabrics_details)
+ fabrics_details = strip_substr_dict(fabrics_details)
+ fabrics_details = clean_data(fabrics_details)
+ smart_fabric_info[i] = fabrics_details
+ return smart_fabric_info
+
+
+def clean_data(data):
+ """
+ data: A dictionary.
+ return: A data dictionary after removing items that are not required for end user.
+ """
+ for k in data.copy():
+ if isinstance(data[k], dict):
+ if data[k].get("@odata.id"):
+ del data[k]["@odata.id"]
+ if not data[k]:
+ del data[k]
+ return data
+
+
+def main():
+
+ specs = {
+ "fabric_id": {"type": 'str', "required": False},
+ "fabric_name": {"type": 'str', "required": False}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[
+ ('fabric_id', 'fabric_name')
+ ],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ if module.params.get("fabric_id") is not None:
+ fabric_id = module.params.get("fabric_id")
+ smart_fabric_info = get_smart_fabric_details_via_id(module, rest_obj, fabric_id)
+ smart_fabric_info = strip_smart_fabric_info(module, rest_obj, smart_fabric_info)
+ module.exit_json(msg=SUCCESS_MSG, smart_fabric_info=smart_fabric_info)
+ else:
+ resp = rest_obj.invoke_request('GET', FABRIC_URI)
+ if resp.json_data:
+ smart_fabric_info = resp.json_data.get("value")
+ if module.params.get("fabric_name") is not None:
+ fabric_name_found = False
+ for fabric in smart_fabric_info:
+ fabric_name = module.params.get("fabric_name")
+ if fabric['Name'] == fabric_name:
+ smart_fabric_info = [fabric]
+ fabric_name_found = True
+ if not fabric_name_found:
+ module.exit_json(msg=INVALID_FABRIC_NAME.format(fabric_name), failed=True)
+ smart_fabric_info = strip_smart_fabric_info(module, rest_obj, smart_fabric_info)
+ module.exit_json(msg=SUCCESS_MSG, smart_fabric_info=smart_fabric_info)
+ else:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py
index cae5d8d69..0ac1f2557 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -93,7 +93,7 @@ requirements:
author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink_info.py
new file mode 100644
index 000000000..d6bb0683e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink_info.py
@@ -0,0 +1,346 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_smart_fabric_uplink_info
+short_description: Retrieve details of fabric uplink on OpenManage Enterprise Modular.
+version_added: "7.1.0"
+description: This module retrieve details of fabric uplink on OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ fabric_id:
+ type: str
+ description:
+ - Unique id of the fabric.
+ - I(fabric_id) is mutually exclusive with I(fabric_name).
+ fabric_name:
+ type: str
+ description:
+ - Unique name of the fabric.
+ - I(fabric_name) is mutually exclusive with I(fabric_id).
+ uplink_id:
+ type: str
+ description:
+ - Unique id of the uplink.
+ - I(uplink_id) is mutually exclusive with I(uplink_name).
+ - I(fabric_id) or I(fabric_name) is required along with I(uplink_id).
+ uplink_name:
+ type: str
+ description:
+ - Unique name of the uplink.
+ - I(uplink_name) is mutually exclusive with I(uplink_id).
+ - I(fabric_id) or I(fabric_name) is required along with I(uplink_name).
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Husniya Hameed(@husniya_hameed)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieve all fabric uplink information using fabric_id.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+
+- name: Retrieve all fabric uplink information using fabric_name.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_name: "f1"
+
+- name: Retrieve specific fabric information using uplink_id.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ uplink_id: "1ad54420-b145-49a1-9779-21a579ef6f2d"
+
+- name: Retrieve specific fabric information using uplink_name.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ uplink_name: "u1"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of fabric uplink information retrieval.
+ returned: always
+ sample: "Successfully retrieved the fabric uplink information."
+uplink_info:
+ type: list
+ description: Information about the fabric uplink.
+ returned: on success
+ sample: [{
+ "Description": "",
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "MediaType": "Ethernet",
+ "Name": "u1",
+ "NativeVLAN": 1,
+ "Networks": [{
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "Description": null,
+ "Id": 10155,
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d",
+ "Name": "testvlan",
+ "Type": 1,
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143
+ }],
+ "Ports": [{
+ "AdminStatus": "Enabled",
+ "BlinkStatus": "OFF",
+ "ConfiguredSpeed": "0",
+ "CurrentSpeed": "0",
+ "Description": "",
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "MaxSpeed": "0",
+ "MediaType": "Ethernet",
+ "Name": "",
+ "NodeServiceTag": "SVCTAG1",
+ "OpticsType": "NotPresent",
+ "PortNumber": "ethernet1/1/35",
+ "Role": "Uplink",
+ "Status": "Down",
+ "Type": "PhysicalEthernet"
+ },
+ {
+ "AdminStatus": "Enabled",
+ "BlinkStatus": "OFF",
+ "ConfiguredSpeed": "0",
+ "CurrentSpeed": "0",
+ "Description": "",
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "MaxSpeed": "0",
+ "MediaType": "Ethernet",
+ "Name": "",
+ "NodeServiceTag": "SVCTAG1",
+ "OpticsType": "NotPresent",
+ "PortNumber": "ethernet1/1/35",
+ "Role": "Uplink",
+ "Status": "Down",
+ "Type": "PhysicalEthernet"
+ }],
+ "Summary": {
+ "NetworkCount": 1,
+ "PortCount": 2
+ },
+ "UfdEnable": "Disabled"
+ }]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide
+ for more information about resource URI and its properties."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+ALL_UPLINKS_URI = "NetworkService/Fabrics('{0}')/Uplinks?$expand=Networks,Ports"
+FABRIC_URI = "NetworkService/Fabrics"
+UPLINK_URI = "NetworkService/Fabrics('{0}')/Uplinks('{1}')?$expand=Networks,Ports"
+# Messages
+SUCCESS_MSG = "Successfully retrieved the fabric uplink information."
+UNSUCCESS_MSG = "Unable to retrieve smart fabric uplink information."
+INVALID_FABRIC_ID = "Unable to retrieve smart fabric uplink information with fabric ID {0}."
+INVALID_FABRIC_NAME = "Unable to retrieve smart fabric uplink information with fabric name {0}."
+INVALID_UPLINK_ID = "Unable to retrieve smart fabric uplink information with uplink ID {0}."
+INVALID_UPLINK_NAME = "Unable to retrieve smart fabric uplink information with uplink name {0}."
+ID_UNAVAILABLE = "fabric_id or fabric_name is required along with uplink_id."
+NAME_UNAVAILABLE = "fabric_id or fabric_name is required along with uplink_name."
+
+
+def get_all_uplink_details(module, rest_obj):
+ resp = []
+ try:
+ fabric_det = rest_obj.invoke_request("GET", FABRIC_URI)
+ fabric_resp = fabric_det.json_data.get("value")
+ for each in fabric_resp:
+ if each.get("Uplinks@odata.navigationLink"):
+ uplink_det = each.get("Uplinks@odata.navigationLink")
+ uplink = uplink_det[5:] + "?$expand=Networks,Ports"
+ uplink_details = rest_obj.invoke_request("GET", uplink)
+ for val in uplink_details.json_data.get("value"):
+ resp.append(val)
+ except HTTPError:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ return resp
+
+
+def get_uplink_details_from_fabric_id(module, rest_obj, fabric_id):
+ resp = []
+ try:
+ resp_det = rest_obj.invoke_request("GET", ALL_UPLINKS_URI.format(fabric_id))
+ resp = resp_det.json_data.get("value")
+ except HTTPError:
+ module.exit_json(msg=INVALID_FABRIC_ID.format(fabric_id), failed=True)
+ return resp
+
+
+def get_fabric_id_from_name(module, rest_obj, fabric_name):
+ fabric_id = ""
+ try:
+ resp_det = rest_obj.invoke_request("GET", FABRIC_URI)
+ resp = resp_det.json_data.get("value")
+ for each in resp:
+ if each["Name"] == fabric_name:
+ fabric_id = each["Id"]
+ break
+ except HTTPError:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ if not fabric_id:
+ module.exit_json(msg=INVALID_FABRIC_NAME.format(fabric_name), failed=True)
+ return fabric_id
+
+
+def get_uplink_details(module, rest_obj, fabric_id, uplink_id):
+ resp = []
+ try:
+ resp_det = rest_obj.invoke_request("GET", UPLINK_URI.format(fabric_id, uplink_id))
+ resp = [resp_det.json_data]
+ except HTTPError:
+ module.exit_json(msg=INVALID_UPLINK_ID.format(uplink_id), failed=True)
+ return resp
+
+
+def get_uplink_id_from_name(module, rest_obj, uplink_name, fabric_id):
+ uplink_id = ""
+ try:
+ resp_det = rest_obj.invoke_request("GET", ALL_UPLINKS_URI.format(fabric_id))
+ resp = resp_det.json_data.get("value")
+ for each in resp:
+ if each["Name"] == uplink_name:
+ uplink_id = each["Id"]
+ break
+ except HTTPError:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ if not uplink_id:
+ module.exit_json(msg=INVALID_UPLINK_NAME.format(uplink_name), failed=True)
+ return uplink_id
+
+
+def strip_uplink_info(uplink_info):
+ for item in uplink_info:
+ item = strip_substr_dict(item)
+ if item["Networks"]:
+ for net in item["Networks"]:
+ net = strip_substr_dict(net)
+ if item["Ports"]:
+ for port in item["Ports"]:
+ port = strip_substr_dict(port)
+ return uplink_info
+
+
+def main():
+ specs = {
+ "fabric_id": {"type": "str"},
+ "fabric_name": {"type": "str"},
+ "uplink_id": {"type": "str"},
+ "uplink_name": {"type": "str"}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[('fabric_id', 'fabric_name'), ('uplink_id', 'uplink_name')],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ uplink_info = []
+ fabric_id = module.params["fabric_id"]
+ fabric_name = module.params["fabric_name"]
+ uplink_id = module.params["uplink_id"]
+ uplink_name = module.params["uplink_name"]
+
+ if fabric_id:
+ uplink_info = get_uplink_details_from_fabric_id(module, rest_obj, fabric_id)
+ elif fabric_name:
+ fabric_id = get_fabric_id_from_name(module, rest_obj, fabric_name)
+ if fabric_id:
+ uplink_info = get_uplink_details_from_fabric_id(module, rest_obj, fabric_id)
+
+ if uplink_id and not (fabric_id or fabric_name):
+ module.exit_json(msg=ID_UNAVAILABLE, failed=True)
+ elif uplink_id:
+ uplink_info = get_uplink_details(module, rest_obj, fabric_id, uplink_id)
+ elif uplink_name and not (fabric_id or fabric_name):
+ module.exit_json(msg=NAME_UNAVAILABLE, failed=True)
+ elif uplink_name:
+ uplink_id = get_uplink_id_from_name(module, rest_obj, uplink_name, fabric_id)
+ if uplink_id:
+ uplink_info = get_uplink_details(module, rest_obj, fabric_id, uplink_id)
+
+ if fabric_id is None and fabric_name is None and uplink_id is None and uplink_name is None:
+ uplink_info = get_all_uplink_details(module, rest_obj)
+ if not bool(uplink_info):
+ module.exit_json(msg=SUCCESS_MSG, uplink_info=uplink_info)
+
+ uplink_info_strip = strip_uplink_info(uplink_info)
+ module.exit_json(msg=SUCCESS_MSG, uplink_info=uplink_info_strip)
+
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py
index 8c5fa98b3..6bf77ad02 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.5.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -39,13 +39,13 @@ options:
template_id:
description:
- ID of the existing template.
- - This option is applicable when I(command) is C(modify), C(deploy), C(delete) and C(export).
+ - This option is applicable when I(command) is C(modify), C(deploy), C(delete), C(clone) and C(export).
- This option is mutually exclusive with I(template_name).
type: int
template_name:
description:
- Name of the existing template.
- - This option is applicable when I(command) is C(modify), C(deploy), C(delete) and C(export).
+ - This option is applicable when I(command) is C(modify), C(deploy), C(delete), C(clone) and C(export).
- This option is mutually exclusive with I(template_id).
type: str
device_id:
@@ -120,11 +120,26 @@ options:
and servers. This is applicable when I(command) is C(create).
- >-
Refer OpenManage Enterprise API Reference Guide for more details.
+ job_wait:
+ type: bool
+ description:
+ - Provides the option to wait for job completion.
+ - This option is applicable when I(command) is C(create), or C(deploy).
+ default: true
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(true).
+ default: 1200
requirements:
- "python >= 3.8.6"
-author: "Jagadeesh N V (@jagadeeshnv)"
+author:
+ - "Jagadeesh N V (@jagadeeshnv)"
+ - "Husniya Hameed (@husniya_hameed)"
+ - "Kritika Bhateja (@Kritika-Bhateja)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -184,7 +199,7 @@ EXAMPLES = r'''
- name: Deploy template on multiple devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -199,7 +214,7 @@ EXAMPLES = r'''
- name: Deploy template on groups
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -211,7 +226,7 @@ EXAMPLES = r'''
- name: Deploy template on multiple devices along with the attributes values to be modified on the target devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -232,18 +247,18 @@ EXAMPLES = r'''
# Service tags not allowed.
- DeviceId: 12765
Attributes:
- - Id : 15645
- Value : "0.0.0.0"
- IsIgnored : false
+ - Id: 15645
+ Value: "0.0.0.0"
+ IsIgnored: false
- DeviceId: 10173
Attributes:
- - Id : 18968,
- Value : "hostname-1"
- IsIgnored : false
+ - Id: 18968,
+ Value: "hostname-1"
+ IsIgnored: false
- name: Deploy template and Operating System (OS) on multiple devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -277,7 +292,7 @@ EXAMPLES = r'''
- name: "Deploy template on multiple devices and changes the device-level attributes. After the template is deployed,
install OS using its image"
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -293,14 +308,14 @@ install OS using its image"
Attributes:
- DeviceId: 12765
Attributes:
- - Id : 15645
- Value : "0.0.0.0"
- IsIgnored : false
+ - Id: 15645
+ Value: "0.0.0.0"
+ IsIgnored: false
- DeviceId: 10173
Attributes:
- - Id : 18968,
- Value : "hostname-1"
- IsIgnored : false
+ - Id: 18968,
+ Value: "hostname-1"
+ IsIgnored: false
NetworkBootIsoModel:
BootToNetwork: true
ShareType: "NFS"
@@ -456,6 +471,19 @@ install OS using its image"
Name: "Configuration Compliance"
Content: "{{ lookup('ansible.builtin.file', './test.xml') }}"
Type: 2
+
+- name: Create a template from a reference device with Job wait as false
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25123
+ attributes:
+ Name: "New Template"
+ Description: "New Template description"
+ Fqdds: iDRAC,BIOS,
+ job_wait: false
'''
RETURN = r'''
@@ -516,12 +544,13 @@ error_info:
'''
import json
+import time
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import apply_diff_key
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import apply_diff_key, job_tracking
TEMPLATES_URI = "TemplateService/Templates"
@@ -531,12 +560,24 @@ TEMPLATE_ATTRIBUTES = "TemplateService/Templates({template_id})/AttributeDetails
DEVICE_URI = "DeviceService/Devices"
GROUP_URI = "GroupService/Groups"
PROFILE_URI = "ProfileService/Profiles"
+JOB_URI = "JobService/Jobs({job_id})"
SEPRTR = ','
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_FOUND = "Changes found to be applied."
TEMPLATE_NAME_EXISTS = "Template with name '{name}' already exists."
DEPLOY_DEV_ASSIGNED = "The device(s) '{dev}' have been assigned the template(s) '{temp}' " \
"respectively. Please unassign the profiles from the devices."
+MSG_DICT = {'create_when_job_wait_true': "Successfully created a template with ID {0}",
+ 'create_when_job_wait_false': "Successfully submitted a template creation with job ID {0}",
+ 'modify': "Successfully modified the template with ID {0}",
+ 'deploy_when_job_wait_false': "Successfully submitted a template deployment with job ID {0}",
+ 'deploy_when_job_wait_true': "Successfully deployed the template with ID {0}",
+ 'fail': 'Failed to {command} template.',
+ 'delete': "Deleted successfully",
+ 'export': "Exported successfully",
+ 'import': "Imported successfully",
+ 'clone': "Cloned successfully",
+ 'timed_out': "Template operation is in progress. Task excited after 'job_wait_timeout'."}
def get_profiles(rest_obj):
@@ -915,31 +956,49 @@ def fail_module(module, **failmsg):
module.fail_json(**failmsg)
-def exit_module(module, response):
+def exit_module(rest_obj, module, response, time_out=False):
password_no_log(module.params.get("attributes"))
resp = None
- my_change = True
+ changed_flag = True
command = module.params.get('command')
result = {}
if command in ["create", "modify", "deploy", "import", "clone"]:
result["return_id"] = response.json_data
resp = result["return_id"]
- if command == 'deploy' and result["return_id"] == 0:
- result["failed"] = True
- command = 'deploy_fail'
- my_change = False
+ if command == 'deploy':
+ if time_out:
+ command = 'timed_out'
+ changed_flag = False
+ elif not result["return_id"]:
+ result["failed"] = True
+ command = 'deploy_fail'
+ changed_flag = False
+ elif module.params["job_wait"]:
+ command = 'deploy_when_job_wait_true'
+ else:
+ command = 'deploy_when_job_wait_false'
+ elif command == 'create':
+ if time_out:
+ resp = get_job_id(rest_obj, resp)
+ command = 'timed_out'
+ changed_flag = False
+ elif module.params["job_wait"]:
+ command = 'create_when_job_wait_true'
+ else:
+ time.sleep(5)
+ resp = get_job_id(rest_obj, resp)
+ command = 'create_when_job_wait_false'
if command == 'export':
- my_change = False
+ changed_flag = False
result = response.json_data
- msg_dict = {'create': "Successfully created a template with ID {0}".format(resp),
- 'modify': "Successfully modified the template with ID {0}".format(resp),
- 'deploy': "Successfully created the template-deployment job with ID {0}".format(resp),
- 'deploy_fail': 'Failed to deploy template.',
- 'delete': "Deleted successfully",
- 'export': "Exported successfully",
- 'import': "Imported successfully",
- 'clone': "Cloned successfully"}
- module.exit_json(msg=msg_dict.get(command), changed=my_change, **result)
+ message = MSG_DICT.get(command).format(resp)
+ module.exit_json(msg=message, changed=changed_flag, **result)
+
+
+def get_job_id(rest_obj, template_id):
+ template = rest_obj.invoke_request("GET", TEMPLATE_PATH.format(template_id=template_id))
+ job_id = template.json_data.get("TaskId")
+ return job_id
def main():
@@ -954,6 +1013,8 @@ def main():
"device_service_tag": {"required": False, "type": 'list', "default": [], "elements": 'str'},
"device_group_names": {"required": False, "type": 'list', "default": [], "elements": 'str'},
"attributes": {"required": False, "type": 'dict'},
+ "job_wait": {"required": False, "type": "bool", "default": True},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 1200}
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -976,10 +1037,38 @@ def main():
_validate_inputs(module)
with RestOME(module.params, req_session=True) as rest_obj:
path, payload, rest_method = _get_resource_parameters(module, rest_obj)
- # module.exit_json(payload=payload, path=path)
resp = rest_obj.invoke_request(rest_method, path, data=payload)
+ job_wait = module.params["job_wait"]
+ job_id = None
+ if job_wait:
+ if module.params["command"] == "create":
+ template_id = resp.json_data
+ count = 30
+ sleep_time = 5
+ while count > 0:
+ try:
+ job_id = get_job_id(rest_obj, template_id)
+ if job_id:
+ break
+ time.sleep(sleep_time)
+ count = count - sleep_time
+ except HTTPError:
+ time.sleep(sleep_time)
+ count = count - sleep_time
+ continue
+ elif module.params["command"] == "deploy":
+ job_id = resp.json_data
+ if job_id:
+ job_uri = JOB_URI.format(job_id=job_id)
+ job_failed, msg, job_dict, wait_time = job_tracking(rest_obj, job_uri, max_job_wait_sec=module.params["job_wait_timeout"])
+ if job_failed:
+ if job_dict.get('LastRunStatus').get('Name') == "Running":
+ exit_module(rest_obj, module, resp, True)
+ else:
+ message = MSG_DICT.get('fail').format(command=module.params["command"])
+ module.fail_json(msg=message)
if resp.success:
- exit_module(module, resp)
+ exit_module(rest_obj, module, resp)
except HTTPError as err:
fail_module(module, msg=str(err), error_info=json.load(err))
except URLError as err:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py
index 701874f70..88a09ae95 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -37,7 +37,7 @@ requirements:
- "python >= 3.8.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -93,7 +93,7 @@ error_info:
import json
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ssl import SSLError
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py
index e233c5ac5..9e91a5fb3 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -37,7 +37,7 @@ requirements:
- "python >= 3.8.6"
author: "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py
index 987a8b610..c9d0bd97d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -95,7 +95,7 @@ requirements:
author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -194,7 +194,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
NETWORK_HIERARCHY_VIEW = 4 # For Network hierarchy View in a Template
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan_info.py
new file mode 100644
index 000000000..b91a6a946
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan_info.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_template_network_vlan_info
+short_description: Retrieves network configuration of template.
+version_added: "7.2.0"
+description:
+ - "This module retrieves the network configuration of a template on OpenManage Enterprise or OpenManage Enterprise Modular."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ template_id:
+ description:
+ - Id of the template.
+ - This is mutually exclusive with I(template_name).
+ type: int
+ template_name:
+ description:
+ - Name of the template.
+ - This is mutually exclusive with I(template_id).
+ - "C(Note) If I(template_id) or I(template_name) option is not provided, the module retrieves network VLAN info of
+ all templates."
+ type: str
+requirements:
+ - "python >= 3.9.6"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+notes:
+ - Run this module on a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Retrieve network details of all templates.
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve network details using template ID
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 1234
+
+- name: Retrieve network details using template name
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: template1
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of template VLAN information retrieval.
+ returned: always
+ type: str
+ sample: "Successfully retrieved the template network VLAN information."
+vlan_info:
+ description: Information about the template network VLAN.
+ returned: success
+ type: list
+ elements: dict
+ sample: [{
+ "TemplateId": 58,
+ "TemplateName": "t2",
+ "NicBondingTechnology" : "LACP",
+ "NicModel": {
+ "NIC in Mezzanine 1B" : {
+ '1' : {"Port" : 1,
+ "Vlan Tagged" : ["25367", "32656", "32658", "26898"],
+ "Vlan UnTagged" : "21474",
+ "NICBondingEnabled" : "false"},
+ '2' : {"Port" : 2,
+ "Vlan Tagged" : [],
+ "Vlan UnTagged" : "32658",
+ "NIC Bonding Enabled" : "true"}
+ },
+ "NIC in Mezzanine 1A" : {
+ '1' : {"Port" : 1,
+ "Vlan Tagged" : ["32656", "32658"],
+ "Vlan UnTagged" : "25367",
+ "NIC Bonding Enabled" : "true"},
+ '2' : {"Port" : 2,
+ "Vlan Tagged" : ["21474"],
+ "Vlan UnTagged" : "32656",
+ "NIC Bonding Enabled" : "false"}
+ }
+ }}]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+NETWORK_HIERARCHY_VIEW = 4 # For Network hierarchy View in a Template
+TEMPLATE_ATTRIBUTE_VIEW = "TemplateService/Templates({0})/Views({1})/AttributeViewDetails"
+TEMPLATE_VIEW = "TemplateService/Templates" # Add ?$top=9999 if not query
+KEY_ATTR_NAME = 'DisplayName'
+SUB_GRP_ATTR_NAME = 'SubAttributeGroups'
+GRP_ATTR_NAME = 'Attributes'
+GRP_NAME_ID_ATTR_NAME = 'GroupNameId'
+CUSTOM_ID_ATTR_NAME = 'CustomId'
+SUCCESS_MSG = "Successfully retrieved the template network VLAN information."
+NO_TEMPLATES_MSG = "No templates with network info were found."
+
+
+def get_template_details(module, rest_obj):
+ id = module.params.get('template_id')
+ query_param = {"$filter": "Id eq {0}".format(id)}
+ srch = 'Id'
+ if not id:
+ id = module.params.get('template_name')
+ query_param = {"$filter": "Name eq '{0}'".format(id)}
+ srch = 'Name'
+ resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ return xtype
+ module.exit_json(failed=True, msg="Template with {0} '{1}' not found.".format(srch.lower(), id))
+
+
+def get_template_vlan_info(rest_obj, template_id):
+ result = {}
+ try:
+ resp = rest_obj.invoke_request('GET', TEMPLATE_ATTRIBUTE_VIEW.format(template_id, NETWORK_HIERARCHY_VIEW))
+ if resp.json_data.get('AttributeGroups', []):
+ nic_model = resp.json_data.get('AttributeGroups', [])
+ for xnic in nic_model:
+ if xnic.get(KEY_ATTR_NAME) == "NICModel":
+ nic_group = xnic.get('SubAttributeGroups', [])
+ nic_group_dict = {}
+ for nic in nic_group:
+ nic_dict = {}
+ for port in nic.get(SUB_GRP_ATTR_NAME): # ports
+ port_number = port.get(GRP_NAME_ID_ATTR_NAME)
+ port_dict = {"Port": port_number}
+ for partition in port.get(SUB_GRP_ATTR_NAME): # partitions
+ for attribute in partition.get(GRP_ATTR_NAME): # attributes
+ if attribute.get(CUSTOM_ID_ATTR_NAME) != 0:
+ if attribute.get(KEY_ATTR_NAME).lower() == "vlan untagged":
+ port_dict[attribute.get(KEY_ATTR_NAME)] = int(attribute.get("Value"))
+ if attribute.get(KEY_ATTR_NAME).lower() == "vlan tagged":
+ port_dict[attribute.get(KEY_ATTR_NAME)] = []
+ if attribute.get("Value"):
+ port_dict[attribute.get(KEY_ATTR_NAME)] = \
+ list(map(int, (attribute.get("Value")).replace(" ", "").split(",")))
+ if attribute.get(KEY_ATTR_NAME).lower() == "nic bonding enabled":
+ port_dict[attribute.get(KEY_ATTR_NAME)] = attribute.get("Value")
+ nic_dict[port_number] = port_dict
+ nic_group_dict[nic.get(KEY_ATTR_NAME)] = nic_dict
+ result[xnic.get(KEY_ATTR_NAME)] = nic_group_dict
+ if xnic.get(KEY_ATTR_NAME) == "NicBondingTechnology":
+ nic_bonding_list = xnic.get("Attributes", [])
+ for xbnd in nic_bonding_list:
+ if xbnd.get(KEY_ATTR_NAME).lower() == "nic bonding technology":
+ result[xnic.get(KEY_ATTR_NAME)] = xbnd.get('Value')
+ except Exception:
+ result = {}
+ return result
+
+
+def main():
+ argument_spec = {
+ "template_id": {"type": 'int'},
+ "template_name": {"type": 'str'}
+ }
+ argument_spec.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[('template_id', 'template_name')],
+ supports_check_mode=True)
+ try:
+ templates = []
+ with RestOME(module.params, req_session=True) as rest_obj:
+ # all_templates = True
+ if module.params.get("template_id") or module.params.get("template_name"):
+ tmplt = get_template_details(module, rest_obj)
+ templates.append(tmplt)
+ # all_templates = False
+ else:
+ resp = rest_obj.get_all_items_with_pagination(TEMPLATE_VIEW)
+ templates = resp.get("value")
+ vlan_info = []
+ for xtmp in templates:
+ if xtmp.get("ViewTypeId") != 4:
+ result = get_template_vlan_info(rest_obj, xtmp['Id'])
+ result["TemplateId"] = xtmp['Id']
+ result["TemplateName"] = xtmp['Name']
+ vlan_info.append(result)
+ # if vlan_info is not None and not all_templates:
+ module.exit_json(msg=SUCCESS_MSG, vlan_info=vlan_info)
+ # else:
+ # module.exit_json(msg=NO_TEMPLATES_MSG, failed=all_templates)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError,
+ AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py
index c768b4ca5..27092a036 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -56,7 +56,7 @@ requirements:
- "python >= 3.8.6"
author: "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support C(check_mode).
'''
@@ -72,7 +72,7 @@ EXAMPLES = r'''
UserName: "user1"
Password: "UserPassword"
RoleId: "10"
- Enabled: True
+ Enabled: true
- name: Create user with all parameters
dellemc.openmanage.ome_user:
@@ -85,10 +85,10 @@ EXAMPLES = r'''
Description: "user2 description"
Password: "UserPassword"
RoleId: "10"
- Enabled: True
+ Enabled: true
DirectoryServiceId: 0
UserTypeId: 1
- Locked: False
+ Locked: false
Name: "user2"
- name: Modify existing user
@@ -101,7 +101,7 @@ EXAMPLES = r'''
attributes:
UserName: "user3"
RoleId: "10"
- Enabled: True
+ Enabled: true
Description: "Modify user Description"
- name: Delete existing user using id
@@ -236,7 +236,7 @@ def main():
"choices": ['present', 'absent']},
"user_id": {"required": False, "type": 'int'},
"name": {"required": False, "type": 'str'},
- "attributes": {"required": False, "type": 'dict'},
+ "attributes": {"required": False, "type": 'dict', "default": {}},
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -247,8 +247,6 @@ def main():
try:
_validate_inputs(module)
- if module.params.get("attributes") is None:
- module.params["attributes"] = {}
with RestOME(module.params, req_session=True) as rest_obj:
method, path, payload = _get_resource_parameters(module, rest_obj)
resp = rest_obj.invoke_request(method, path, data=payload)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py
index b42f180fe..488444694 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -35,9 +35,9 @@ options:
type: str
requirements:
- "python >= 3.8.6"
-author: "Jagadeesh N V(@jagadeeshnv)"
+author: "Jagadeesh N V (@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py
index c0a0fc475..c974aaccc 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+
@@ -30,7 +30,7 @@ options:
- The HTTPS URI of the destination to send events.
- HTTPS is required.
type: str
- required: True
+ required: true
event_type:
description:
- Specifies the event type to be subscribed.
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py
index a03ba0407..98f64f780 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.5.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -30,19 +30,38 @@ options:
- Firmware Image location URI or local path.
- For example- U(http://<web_address>/components.exe) or /home/firmware_repo/component.exe.
type: str
- required: True
+ required: true
transfer_protocol:
description: Protocol used to transfer the firmware image file. Applicable for URI based update.
type: str
default: HTTP
choices: ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]
+ job_wait:
+ description: Provides the option to wait for job completion.
+ type: bool
+ default: true
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(true).
+ - "Note: If a firmware update needs a reboot, the job will get scheduled and waits for
+ no of seconds specfied in I(job_wait_time). to reduce the wait time either give
+ I(job_wait_time) minimum or make I(job_wait)as false and retrigger."
+ default: 3600
requirements:
- "python >= 3.8.6"
- "urllib3"
author:
- "Felix Stephen (@felixs88)"
+ - "Husniya Hameed (@husniya_hameed)"
+ - "Shivam Sharma (@Shivam-Sharma)"
+ - "Kritika Bhateja (@Kritika_Bhateja)"
+ - "Abhishek Sinha (@ABHISHEK-SINHA10)"
notes:
- Run this module from a system that has direct access to Redfish APIs.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports only iDRAC9 and above.
- This module does not support C(check_mode).
"""
@@ -57,6 +76,17 @@ EXAMPLES = """
image_uri: "http://192.168.0.2/firmware_repo/component.exe"
transfer_protocol: "HTTP"
+- name: Update the firmware from a single executable file available in a HTTP protocol with job_Wait
+ dellemc.openmanage.redfish_firmware:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ image_uri: "http://192.168.0.2/firmware_repo/component.exe"
+ transfer_protocol: "HTTP"
+ job_wait: true
+ job_wait_timeout: 600
+
- name: Update the firmware from a single executable file available in a local path
dellemc.openmanage.redfish_firmware:
baseuri: "192.168.0.1"
@@ -72,7 +102,7 @@ msg:
description: Overall status of the firmware update task.
returned: always
type: str
- sample: Successfully submitted the firmware update task.
+ sample: "Successfully updated the firmware."
task:
description: Returns ID and URI of the created task.
returned: success
@@ -112,6 +142,7 @@ error_info:
import json
import os
+import time
from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
@@ -126,7 +157,16 @@ except ImportError:
HAS_LIB = False
UPDATE_SERVICE = "UpdateService"
-JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}"
+JOB_URI = "JobService/Jobs/{job_id}"
+JOB_WAIT_MSG = 'Job wait timed out after {0} seconds.'
+FAIL_JOB_MSG = "Firmware update failed."
+SUCCESS_JOB_MSG = "Successfully updated the firmware."
+SCHEDULE_JOB_MSG = "Successfully scheduled the firmware job."
+JOBSTATUS_SUCCESS = "success"
+JOBSTATUS_FAILED = "failed"
+JOBSTATUS_TIMED_OUT = "timed_out"
+JOBSTATUS_SCHEDULED = "scheduled"
+JOBSTATUS_ERRORED = "errored"
def _encode_form_data(payload_file):
@@ -176,8 +216,7 @@ def firmware_update(obj, module):
data, ctype = _encode_form_data(binary_payload)
headers = {"If-Match": resp_inv.headers.get("etag")}
headers.update({"Content-Type": ctype})
- upload_status = obj.invoke_request("POST", push_uri, data=data, headers=headers, dump=False,
- api_timeout=100)
+ upload_status = obj.invoke_request("POST", push_uri, data=data, headers=headers, dump=False, api_timeout=module.params["timeout"])
if upload_status.status_code == 201:
payload = {"ImageURI": upload_status.headers.get("location")}
update_status = obj.invoke_request("POST", update_uri, data=payload)
@@ -186,11 +225,51 @@ def firmware_update(obj, module):
return update_status
+def wait_for_job_completion(module, job_uri, job_wait_timeout=900, interval=30):
+ try:
+ with Redfish(module.params, req_session=False) as obj:
+ track_counter = 0
+ final_jobstatus = ""
+ job_msg = ""
+ while track_counter <= job_wait_timeout:
+ try:
+ response = obj.invoke_request("GET", "{0}{1}".format(obj.root_uri, job_uri))
+ if response.json_data.get("PercentComplete") == 100 and response.json_data.get("JobState") == "Completed":
+ if response.json_data.get("JobStatus") == "OK":
+ final_jobstatus = JOBSTATUS_SUCCESS
+ job_msg = SUCCESS_JOB_MSG
+ else:
+ final_jobstatus = JOBSTATUS_FAILED
+ job_msg = FAIL_JOB_MSG
+ break
+ track_counter += interval
+ time.sleep(interval)
+ except (HTTPError, URLError):
+ track_counter += interval
+ time.sleep(interval)
+ # TIMED OUT
+ # when job is scheduled
+ if not final_jobstatus:
+ if response.json_data.get("PercentComplete") == 0 and response.json_data.get("JobState") == "Starting":
+ final_jobstatus = JOBSTATUS_SCHEDULED
+ job_msg = SCHEDULE_JOB_MSG
+ # when job timed out
+ else:
+ job_msg = JOB_WAIT_MSG.format(job_wait_timeout)
+ final_jobstatus = JOBSTATUS_TIMED_OUT
+ except Exception as error_message:
+ job_msg = str(error_message)
+ module.exit_json(msg=str(job_msg))
+ final_jobstatus = JOBSTATUS_ERRORED
+ return final_jobstatus, job_msg
+
+
def main():
specs = {
"image_uri": {"required": True, "type": "str"},
- "transfer_protocol": {"type": "str", "default": "HTTP",
- "choices": ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]},
+ "transfer_protocol": {"type": "str", "default": "HTTP", "choices": ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 3600}
}
specs.update(redfish_auth_params)
module = AnsibleModule(
@@ -206,10 +285,21 @@ def main():
message = "Successfully submitted the firmware update task."
task_uri = status.headers.get("Location")
job_id = task_uri.split("/")[-1]
- module.exit_json(msg=message, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, changed=True)
- module.fail_json(msg=message, error_info=json.loads(status))
+ else:
+ module.fail_json(msg=message, error_info=json.loads(status))
+ job_wait = module.params['job_wait']
+ job_wait_timeout = module.params['job_wait_timeout']
+ if job_wait and job_wait_timeout > 0:
+ job_uri = JOB_URI.format(job_id=job_id)
+ job_resp, job_msg = wait_for_job_completion(module, job_uri, job_wait_timeout=module.params['job_wait_timeout'])
+ if job_resp == JOBSTATUS_FAILED:
+ module.exit_json(msg=job_msg, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, failed=True)
+ else:
+ module.exit_json(msg=job_msg, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, changed=True)
+ else:
+ module.exit_json(msg=message, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, changed=True)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError, IOError, AssertionError, OSError, SSLError) as e:
module.fail_json(msg=str(e))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware_rollback.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware_rollback.py
new file mode 100644
index 000000000..ef93d669f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware_rollback.py
@@ -0,0 +1,358 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+---
+module: redfish_firmware_rollback
+short_description: To perform a component firmware rollback using component name
+version_added: "8.2.0"
+description:
+ - This module allows to rollback the firmware of different server components.
+ - Depending on the component, the firmware update is applied after an automatic or manual reboot.
+extends_documentation_fragment:
+ - dellemc.openmanage.redfish_auth_options
+options:
+ name:
+ type: str
+ required: true
+ description: The name or regular expression of the component to match and is case-sensitive.
+ reboot:
+ description:
+ - Reboot the server to apply the previous version of the firmware.
+ - C(true) reboots the server to rollback the firmware to the available version.
+ - C(false) schedules the rollback of firmware until the next restart.
+ - When I(reboot) is C(false), some components update immediately, and the server may reboot.
+ So, the module must wait till the server is accessible.
+ type: bool
+ default: true
+ reboot_timeout:
+ type: int
+ description: Wait time in seconds. The module waits for this duration till the server reboots.
+ default: 900
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to Redfish APIs.
+ - For components that do not require a reboot, firmware rollback proceeds irrespective of
+ I(reboot) is C(true) or C(false).
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Rollback a BIOS component firmware
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "BIOS"
+
+- name: Rollback all NIC cards with a name starting from 'Broadcom Gigabit'.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Broadcom Gigabit Ethernet.*"
+
+- name: Rollback all the component firmware except BIOS component.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "(?!BIOS).*"
+
+- name: Rollback all the available firmware component.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: ".*"
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall firmware rollback status.
+ returned: always
+ sample: "Successfully completed the job for firmware rollback."
+status:
+ type: list
+ description: Firmware rollback job and progress details from the iDRAC.
+ returned: success
+ sample: [{
+ "ActualRunningStartTime": "2023-08-04T12:26:55",
+ "ActualRunningStopTime": "2023-08-04T12:32:35",
+ "CompletionTime": "2023-08-04T12:32:35",
+ "Description": "Job Instance",
+ "EndTime": "TIME_NA",
+ "Id": "JID_911698303631",
+ "JobState": "Completed",
+ "JobType": "FirmwareUpdate",
+ "Message": "Job completed successfully.",
+ "MessageArgs": [],
+ "MessageId": "PR19",
+ "Name": "Firmware Rollback: Firmware",
+ "PercentComplete": 100,
+ "StartTime": "2023-08-04T12:23:50",
+ "TargetSettingsURI": null
+ }]
+error_info:
+ type: dict
+ description: Details of the HTTP error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [{
+ "Message": "InstanceID value provided for the update operation is invalid",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "IDRAC.2.8.SUP024",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "Enumerate inventory, copy the InstanceID value and provide that value for the update operation.",
+ "Severity": "Warning"
+ }],
+ "code": "Base.1.12.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information"
+ }
+ }
+"""
+
+
+import json
+import re
+import time
+from ssl import SSLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params, \
+ SESSION_RESOURCE_COLLECTION
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import wait_for_redfish_reboot_job, \
+ wait_for_redfish_job_complete, strip_substr_dict, MANAGER_JOB_ID_URI, RESET_UNTRACK, MANAGERS_URI, RESET_SUCCESS
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+
+UPDATE_SERVICE = "UpdateService"
+SYSTEM_RESOURCE_ID = "System.Embedded.1"
+NO_COMPONENTS = "There were no firmware components to rollback."
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+NOT_SUPPORTED = "The target firmware version does not support the firmware rollback."
+COMPLETED_ERROR = "The job for firmware rollback has been completed with error(s)."
+SCHEDULED_ERROR = "The job for firmware rollback has been scheduled with error(s)."
+ROLLBACK_SUCCESS = "Successfully completed the job for firmware rollback."
+ROLLBACK_SCHEDULED = "Successfully scheduled the job for firmware rollback."
+ROLLBACK_FAILED = "Failed to complete the job for firmware rollback."
+REBOOT_FAIL = "Failed to reboot the server."
+NEGATIVE_TIMEOUT_MESSAGE = "The parameter reboot_timeout value cannot be negative or zero."
+JOB_WAIT_MSG = "Task excited after waiting for {0} seconds. Check console for firmware rollback status."
+REBOOT_COMP = ["Integrated Dell Remote Access Controller"]
+
+
+def get_rollback_preview_target(redfish_obj, module):
+ action_resp = redfish_obj.invoke_request("GET", "{0}{1}".format(redfish_obj.root_uri, UPDATE_SERVICE))
+ action_attr = action_resp.json_data["Actions"]
+ update_uri = None
+ if "#UpdateService.SimpleUpdate" in action_attr:
+ update_service = action_attr.get("#UpdateService.SimpleUpdate")
+ if 'target' not in update_service:
+ module.fail_json(msg=NOT_SUPPORTED)
+ update_uri = update_service.get('target')
+ inventory_uri = action_resp.json_data.get('FirmwareInventory').get('@odata.id')
+ inventory_uri_resp = redfish_obj.invoke_request("GET", "{0}{1}".format(inventory_uri, "?$expand=*($levels=1)"),
+ api_timeout=120)
+ previous_component = list(filter(lambda d: d["Id"].startswith("Previous"), inventory_uri_resp.json_data["Members"]))
+ if not previous_component:
+ module.fail_json(msg=NO_COMPONENTS)
+ component_name = module.params["name"]
+ try:
+ component_compile = re.compile(r"^{0}$".format(component_name))
+ except Exception:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ prev_uri, reboot_uri = {}, []
+ for each in previous_component:
+ available_comp = each["Name"]
+ available_name = re.match(component_compile, available_comp)
+ if not available_name:
+ continue
+ if available_name.group() in REBOOT_COMP:
+ reboot_uri.append(each["@odata.id"])
+ continue
+ prev_uri[each["Version"]] = each["@odata.id"]
+ if module.check_mode and (prev_uri or reboot_uri):
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif not prev_uri and not reboot_uri:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ return list(prev_uri.values()), reboot_uri, update_uri
+
+
+def get_job_status(redfish_obj, module, job_ids, job_wait=True):
+ each_status, failed_count, js_job_msg = [], 0, ""
+ wait_timeout = module.params["reboot_timeout"]
+ for each in job_ids:
+ each_job_uri = MANAGER_JOB_ID_URI.format(each)
+ job_resp, js_job_msg = wait_for_redfish_job_complete(redfish_obj, each_job_uri, job_wait=job_wait,
+ wait_timeout=wait_timeout)
+ if job_resp and js_job_msg:
+ module.exit_json(msg=JOB_WAIT_MSG.format(wait_timeout), job_status=[strip_substr_dict(job_resp.json_data)],
+ changed=True)
+ job_status = job_resp.json_data
+ if job_status["JobState"] == "Failed":
+ failed_count += 1
+ strip_odata = strip_substr_dict(job_status)
+ each_status.append(strip_odata)
+ return each_status, failed_count
+
+
+def require_session(idrac, module):
+ session_id, token = "", None
+ payload = {'UserName': module.params["username"], 'Password': module.params["password"]}
+ path = SESSION_RESOURCE_COLLECTION["SESSION"]
+ resp = idrac.invoke_request('POST', path, data=payload, api_timeout=120)
+ if resp and resp.success:
+ session_id = resp.json_data.get("Id")
+ token = resp.headers.get('X-Auth-Token')
+ return session_id, token
+
+
+def wait_for_redfish_idrac_reset(module, redfish_obj, wait_time_sec, interval=30):
+ time.sleep(interval // 2)
+ msg = RESET_UNTRACK
+ wait = wait_time_sec
+ track_failed = True
+ resetting = False
+ while wait > 0 and track_failed:
+ try:
+ redfish_obj.invoke_request("GET", MANAGERS_URI, api_timeout=120)
+ msg = RESET_SUCCESS
+ track_failed = False
+ break
+ except HTTPError as err:
+ if err.getcode() == 401:
+ new_redfish_obj = Redfish(module.params, req_session=True)
+ sid, token = require_session(new_redfish_obj, module)
+ redfish_obj.session_id = sid
+ redfish_obj._headers.update({"X-Auth-Token": token})
+ track_failed = False
+ if not resetting:
+ resetting = True
+ break
+ time.sleep(interval)
+ wait -= interval
+ resetting = True
+ except URLError:
+ time.sleep(interval)
+ wait -= interval
+ if not resetting:
+ resetting = True
+ except Exception:
+ time.sleep(interval)
+ wait -= interval
+ resetting = True
+ return track_failed, resetting, msg
+
+
+def simple_update(redfish_obj, preview_uri, update_uri):
+ job_ids = []
+ for uri in preview_uri:
+ resp = redfish_obj.invoke_request("POST", update_uri, data={"ImageURI": uri})
+ time.sleep(30)
+ task_uri = resp.headers.get("Location")
+ task_id = task_uri.split("/")[-1]
+ job_ids.append(task_id)
+ return job_ids
+
+
+def rollback_firmware(redfish_obj, module, preview_uri, reboot_uri, update_uri):
+ current_job_status, failed_cnt, resetting = [], 0, False
+ job_ids = simple_update(redfish_obj, preview_uri, update_uri)
+ if module.params["reboot"] and preview_uri:
+ payload = {"ResetType": "ForceRestart"}
+ job_resp_status, reset_status, reset_fail = wait_for_redfish_reboot_job(redfish_obj, SYSTEM_RESOURCE_ID,
+ payload=payload)
+ if reset_status and job_resp_status:
+ job_uri = MANAGER_JOB_ID_URI.format(job_resp_status["Id"])
+ job_resp, job_msg = wait_for_redfish_job_complete(redfish_obj, job_uri)
+ job_status = job_resp.json_data
+ if job_status["JobState"] != "RebootCompleted":
+ if job_msg:
+ module.fail_json(msg=JOB_WAIT_MSG.format(module.params["reboot_timeout"]))
+ else:
+ module.fail_json(msg=REBOOT_FAIL)
+ elif not reset_status and reset_fail:
+ module.fail_json(msg=reset_fail)
+
+ current_job_status, failed = get_job_status(redfish_obj, module, job_ids, job_wait=True)
+ failed_cnt += failed
+ if not module.params["reboot"] and preview_uri:
+ current_job_status, failed = get_job_status(redfish_obj, module, job_ids, job_wait=False)
+ failed_cnt += failed
+ if reboot_uri:
+ job_ids = simple_update(redfish_obj, reboot_uri, update_uri)
+ track, resetting, js_job_msg = wait_for_redfish_idrac_reset(module, redfish_obj, 900)
+ if not track and resetting:
+ reboot_job_status, failed = get_job_status(redfish_obj, module, job_ids, job_wait=True)
+ current_job_status.extend(reboot_job_status)
+ failed_cnt += failed
+ return current_job_status, failed_cnt, resetting
+
+
+def main():
+ specs = {
+ "name": {"required": True, "type": "str"},
+ "reboot": {"type": "bool", "default": True},
+ "reboot_timeout": {"type": "int", "default": 900},
+ }
+ specs.update(redfish_auth_params)
+ module = AnsibleModule(argument_spec=specs, supports_check_mode=True)
+ if module.params["reboot_timeout"] <= 0:
+ module.fail_json(msg=NEGATIVE_TIMEOUT_MESSAGE)
+ try:
+ with Redfish(module.params, req_session=True) as redfish_obj:
+ preview_uri, reboot_uri, update_uri = get_rollback_preview_target(redfish_obj, module)
+ job_status, failed_count, resetting = rollback_firmware(redfish_obj, module, preview_uri, reboot_uri, update_uri)
+ if not job_status or (failed_count == len(job_status)):
+ module.exit_json(msg=ROLLBACK_FAILED, status=job_status, failed=True)
+ if module.params["reboot"]:
+ msg, module_fail, changed = ROLLBACK_SUCCESS, False, True
+ if failed_count > 0 and failed_count != len(job_status):
+ msg, module_fail, changed = COMPLETED_ERROR, True, False
+ else:
+ msg, module_fail, changed = ROLLBACK_SCHEDULED, False, True
+ if failed_count > 0 and failed_count != len(job_status):
+ msg, module_fail, changed = SCHEDULED_ERROR, True, False
+ elif resetting and len(job_status) == 1 and failed_count != len(job_status):
+ msg, module_fail, changed = ROLLBACK_SUCCESS, False, True
+ module.exit_json(msg=msg, job_status=job_status, failed=module_fail, changed=changed)
+ except HTTPError as err:
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, IOError, AssertionError, OSError, SSLError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py
index 23094b158..085bbc018 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -29,7 +29,7 @@ options:
For example- U(https://<I(baseuri)>/redfish/v1/Systems/<I(resource_id)>).
- This option is mandatory for I(base_uri) with multiple devices.
- To get the device details, use the API U(https://<I(baseuri)>/redfish/v1/Systems).
- required: False
+ required: false
type: str
reset_type:
description:
@@ -46,7 +46,7 @@ options:
- If C(PushPowerButton), Simulates the pressing of a physical power button on the device.
- When a power control operation is performed, which is not supported on the device, an error message is displayed
with the list of operations that can be performed.
- required: True
+ required: true
type: str
choices: ["ForceOff", "ForceOn", "ForceRestart", "GracefulRestart", "GracefulShutdown",
"Nmi", "On", "PowerCycle", "PushPowerButton"]
@@ -121,7 +121,7 @@ from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
powerstate_map = {}
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
index ce02b4c00..d8f0c5503 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.5.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -58,17 +58,13 @@ options:
volume_type:
description:
- One of the following volume types must be selected to create a volume.
- - >-
- C(Mirrored) The volume is a mirrored device.
- - >-
- C(NonRedundant) The volume is a non-redundant storage device.
- - >-
- C(SpannedMirrors) The volume is a spanned set of mirrored devices.
- - >-
- C(SpannedStripesWithParity) The volume is a spanned set of devices which uses parity to retain redundant
+ - C(NonRedundant) The volume is a non-redundant storage device.
+ - C(Mirrored) The volume is a mirrored device.
+ - C(StripedWithParity) The volume is a device which uses parity to retain redundant information.
+ - C(SpannedMirrors) The volume is a spanned set of mirrored devices.
+ - C(SpannedStripesWithParity) The volume is a spanned set of devices which uses parity to retain redundant
information.
- - >-
- C(StripedWithParity) The volume is a device which uses parity to retain redundant information.
+ - I(volume_type) is mutually exclusive with I(raid_type).
type: str
choices: [NonRedundant, Mirrored, StripedWithParity, SpannedMirrors, SpannedStripesWithParity]
name:
@@ -76,6 +72,7 @@ options:
- Name of the volume to be created.
- Only applicable when I(state) is C(present).
type: str
+ aliases: ['volume_name']
drives:
description:
- FQDD of the Physical disks.
@@ -125,15 +122,72 @@ options:
type: str
choices: [Fast, Slow]
default: Fast
+ raid_type:
+ description:
+ - C(RAID0) to create a RAID0 type volume.
+ - C(RAID1) to create a RAID1 type volume.
+ - C(RAID5) to create a RAID5 type volume.
+ - C(RAID6) to create a RAID6 type volume.
+ - C(RAID10) to create a RAID10 type volume.
+ - C(RAID50) to create a RAID50 type volume.
+ - C(RAID60) to create a RAID60 type volume.
+ - I(raid_type) is mutually exclusive with I(volume_type).
+ type: str
+ choices: [RAID0, RAID1, RAID5, RAID6, RAID10, RAID50, RAID60]
+ version_added: 8.3.0
+ apply_time:
+ description:
+ - Apply time of the Volume configuration.
+ - C(Immediate) allows you to apply the volume configuration on the host server immediately and apply the changes. This is applicable for I(job_wait).
+ - C(OnReset) allows you to apply the changes on the next reboot of the host server.
+ - I(apply_time) has a default value based on the different types of the controller.
+ For example, BOSS-S1 and BOSS-N1 controllers have a default value of I(apply_time) as C(OnReset),
+ and PERC controllers have a default value of I(apply_time) as C(Immediate).
+ type: str
+ choices: [Immediate, OnReset]
+ version_added: 8.5.0
+ reboot_server:
+ description:
+ - Reboot the server to apply the changes.
+ - I(reboot_server) is applicable only when I(apply_timeout) is C(OnReset) or when the default value for the apply time of the controller is C(OnReset).
+ type: bool
+ default: false
+ version_added: 8.5.0
+ force_reboot:
+ description:
+ - Reboot the server forcefully to apply the changes when the normal reboot fails.
+ - I(force_reboot) is applicable only when I(reboot_server) is C(true).
+ type: bool
+ default: false
+ version_added: 8.5.0
+ job_wait:
+ description:
+ - This parameter provides the option to wait for the job completion.
+ - This is applicable when I(apply_time) is C(Immediate).
+ - This is applicable when I(apply_time) is C(OnReset) and I(reboot_server) is C(true).
+ type: bool
+ default: false
+ version_added: 8.5.0
+ job_wait_timeout:
+ description:
+ - This parameter is the maximum wait time of I(job_wait) in seconds.
+ - This option is applicable when I(job_wait) is C(true).
+ type: int
+ default: 1200
+ version_added: 8.5.0
+
requirements:
- - "python >= 3.8.6"
-author: "Sajna Shetty(@Sajna-Shetty)"
+ - "python >= 3.9.6"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+ - "Kritika Bhateja(@Kritika-Bhateja-03)"
notes:
- Run this module from a system that has direct access to Redfish APIs.
- This module supports C(check_mode).
- This module always reports changes when I(name) and I(volume_id) are not specified.
Either I(name) or I(volume_id) is required to support C(check_mode).
+ - This module supports IPv4 and IPv6 addresses.
'''
EXAMPLES = r'''
@@ -167,7 +221,48 @@ EXAMPLES = r'''
controller_id: "RAID.Slot.1-1"
volume_type: "NonRedundant"
drives:
- - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+
+- name: Create a RAID0 on PERC controller on reset
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ apply_time: OnReset
+
+- name: Create a RAID0 on BOSS controller with restart
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ apply_time: OnReset
+ reboot_server: true
+
+- name: Create a RAID0 on BOSS controller with force restart
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ reboot_server: true
+ force_reboot: true
- name: Modify a volume's encryption type settings
dellemc.openmanage.redfish_storage_volume:
@@ -198,6 +293,38 @@ EXAMPLES = r'''
command: "initialize"
volume_id: "Disk.Virtual.6:RAID.Slot.1-1"
initialize_type: "Slow"
+
+- name: Create a RAID6 volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID6"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-3
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-4
+
+- name: Create a RAID60 volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID60"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-3
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-4
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-5
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-6
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-7
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-8
'''
RETURN = r'''
@@ -213,7 +340,7 @@ task:
returned: success
sample: {
"id": "JID_XXXXXXXXXXXXX",
- "uri": "/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXXX"
+ "uri": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"
}
error_info:
type: dict
@@ -249,6 +376,8 @@ from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import MANAGER_JOB_ID_URI, wait_for_redfish_reboot_job, \
+ strip_substr_dict, wait_for_job_completion
VOLUME_INITIALIZE_URI = "{storage_base_uri}/Volumes/{volume_id}/Actions/Volume.Initialize"
@@ -257,9 +386,26 @@ CONTROLLER_URI = "{storage_base_uri}/{controller_id}"
SETTING_VOLUME_ID_URI = "{storage_base_uri}/Volumes/{volume_id}/Settings"
CONTROLLER_VOLUME_URI = "{storage_base_uri}/{controller_id}/Volumes"
VOLUME_ID_URI = "{storage_base_uri}/Volumes/{volume_id}"
+APPLY_TIME_INFO_API = CONTROLLER_URI + "/Volumes"
+REBOOT_API = "Actions/ComputerSystem.Reset"
storage_collection_map = {}
CHANGES_FOUND = "Changes found to be applied."
NO_CHANGES_FOUND = "No changes found to be applied."
+RAID_TYPE_NOT_SUPPORTED_MSG = "RAID Type {raid_type} is not supported."
+APPLY_TIME_NOT_SUPPORTED_MSG = "Apply time {apply_time} is not supported. The supported values \
+are {supported_apply_time_values}. Enter the valid values and retry the operation."
+JOB_COMPLETION = "The job is successfully completed."
+JOB_SUBMISSION = "The job is successfully submitted."
+JOB_FAILURE_PROGRESS_MSG = "Unable to complete the task initiated for creating the storage volume."
+REBOOT_FAIL = "Failed to reboot the server."
+CONTROLLER_NOT_EXIST_ERROR = "Specified Controller {controller_id} does not exist in the System."
+TIMEOUT_NEGATIVE_OR_ZERO_MSG = "The parameter job_wait_timeout value cannot be negative or zero."
+SYSTEM_ID = "System.Embedded.1"
+volume_type_map = {"NonRedundant": "RAID0",
+ "Mirrored": "RAID1",
+ "StripedWithParity": "RAID5",
+ "SpannedMirrors": "RAID10",
+ "SpannedStripesWithParity": "RAID50"}
def fetch_storage_resource(module, session_obj):
@@ -269,6 +415,7 @@ def fetch_storage_resource(module, session_obj):
system_members = system_resp.json_data.get("Members")
if system_members:
system_id_res = system_members[0]["@odata.id"]
+ SYSTEM_ID = system_id_res.split('/')[-1]
system_id_res_resp = session_obj.invoke_request("GET", system_id_res)
system_id_res_data = system_id_res_resp.json_data.get("Storage")
if system_id_res_data:
@@ -294,16 +441,17 @@ def volume_payload(module):
oem = params.get("oem")
encrypted = params.get("encrypted")
encryption_types = params.get("encryption_types")
+ volume_type = params.get("volume_type")
+ raid_type = params.get("raid_type")
+ apply_time = params.get("apply_time")
if capacity_bytes:
capacity_bytes = int(capacity_bytes)
if drives:
storage_base_uri = storage_collection_map["storage_base_uri"]
physical_disks = [{"@odata.id": DRIVES_URI.format(storage_base_uri=storage_base_uri,
driver_id=drive_id)} for drive_id in drives]
-
raid_mapper = {
"Name": params.get("name"),
- "VolumeType": params.get("volume_type"),
"BlockSizeBytes": params.get("block_size_bytes"),
"CapacityBytes": capacity_bytes,
"OptimumIOSizeBytes": params.get("optimum_io_size_bytes"),
@@ -316,7 +464,12 @@ def volume_payload(module):
raid_payload.update({"Encrypted": encrypted})
if encryption_types:
raid_payload.update({"EncryptionTypes": [encryption_types]})
-
+ if volume_type:
+ raid_payload.update({"RAIDType": volume_type_map.get(volume_type)})
+ if raid_type:
+ raid_payload.update({"RAIDType": raid_type})
+ if apply_time is not None:
+ raid_payload.update({"@Redfish.OperationApplyTime": apply_time})
return raid_payload
@@ -353,9 +506,7 @@ def check_specified_identifier_exists_in_the_system(module, session_obj, uri, er
return resp
except HTTPError as err:
if err.code == 404:
- if module.check_mode:
- return err
- module.fail_json(msg=err_message)
+ module.exit_json(msg=err_message, failed=True)
raise err
except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
raise err
@@ -367,8 +518,7 @@ def check_controller_id_exists(module, session_obj):
"""
specified_controller_id = module.params.get("controller_id")
uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=specified_controller_id)
- err_message = "Specified Controller {0} does " \
- "not exist in the System.".format(specified_controller_id)
+ err_message = CONTROLLER_NOT_EXIST_ERROR.format(controller_id=specified_controller_id)
resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, err_message)
if resp.success:
return check_physical_disk_exists(module, resp.json_data["Drives"])
@@ -420,6 +570,7 @@ def check_mode_validation(module, session_obj, action, uri):
encryption_types = module.params.get("encryption_types")
encrypted = module.params.get("encrypted")
volume_type = module.params.get("volume_type")
+ raid_type = module.params.get("raid_type")
drives = module.params.get("drives")
if name is None and volume_id is None and module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True)
@@ -444,12 +595,12 @@ def check_mode_validation(module, session_obj, action, uri):
exist_value = {"Name": resp_data["Name"], "BlockSizeBytes": resp_data["BlockSizeBytes"],
"CapacityBytes": resp_data["CapacityBytes"], "Encrypted": resp_data["Encrypted"],
"EncryptionTypes": resp_data["EncryptionTypes"][0],
- "OptimumIOSizeBytes": resp_data["OptimumIOSizeBytes"], "VolumeType": resp_data["VolumeType"]}
+ "OptimumIOSizeBytes": resp_data["OptimumIOSizeBytes"], "RAIDType": resp_data["RAIDType"]}
exit_value_filter = dict([(k, v) for k, v in exist_value.items() if v is not None])
cp_exist_value = copy.deepcopy(exit_value_filter)
req_value = {"Name": name, "BlockSizeBytes": block_size_bytes,
"Encrypted": encrypted, "OptimumIOSizeBytes": optimum_io_size_bytes,
- "VolumeType": volume_type, "EncryptionTypes": encryption_types}
+ "RAIDType": raid_type, "EncryptionTypes": encryption_types}
if capacity_bytes is not None:
req_value["CapacityBytes"] = int(capacity_bytes)
req_value_filter = dict([(k, v) for k, v in req_value.items() if v is not None])
@@ -469,12 +620,63 @@ def check_mode_validation(module, session_obj, action, uri):
return None
+def check_raid_type_supported(module, session_obj):
+ volume_type = module.params.get("volume_type")
+ if volume_type:
+ raid_type = volume_type_map.get(volume_type)
+ else:
+ raid_type = module.params.get("raid_type")
+ if raid_type:
+ try:
+ specified_controller_id = module.params.get("controller_id")
+ uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=specified_controller_id)
+ resp = session_obj.invoke_request("GET", uri)
+ supported_raid_types = resp.json_data['StorageControllers'][0]['SupportedRAIDTypes']
+ if raid_type not in supported_raid_types:
+ module.exit_json(msg=RAID_TYPE_NOT_SUPPORTED_MSG.format(raid_type=raid_type), failed=True)
+ except (HTTPError, URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def get_apply_time(module, session_obj, controller_id):
+ """
+ gets the apply time from user if given otherwise fetches from server
+ """
+ apply_time = module.params.get("apply_time")
+ try:
+ uri = APPLY_TIME_INFO_API.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
+ resp = session_obj.invoke_request("GET", uri)
+ supported_apply_time_values = resp.json_data['@Redfish.OperationApplyTimeSupport']['SupportedValues']
+ if apply_time:
+ if apply_time not in supported_apply_time_values:
+ module.exit_json(msg=APPLY_TIME_NOT_SUPPORTED_MSG.format(apply_time=apply_time, supported_apply_time_values=supported_apply_time_values),
+ failed=True)
+ else:
+ apply_time = supported_apply_time_values[0]
+ return apply_time
+ except (HTTPError, URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def check_apply_time_supported_and_reboot_required(module, session_obj, controller_id):
+ """
+ checks whether the apply time is supported and reboot operation is required or not.
+ """
+ apply_time = get_apply_time(module, session_obj, controller_id)
+ reboot_server = module.params.get("reboot_server")
+ if reboot_server and apply_time == "OnReset":
+ return True
+ return False
+
+
def perform_volume_create_modify(module, session_obj):
"""
perform volume creation and modification for state present
"""
specified_controller_id = module.params.get("controller_id")
volume_id = module.params.get("volume_id")
+ check_raid_type_supported(module, session_obj)
+ action, uri, method = None, None, None
if specified_controller_id is not None:
check_controller_id_exists(module, session_obj)
uri = CONTROLLER_VOLUME_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"],
@@ -583,6 +785,76 @@ def validate_inputs(module):
" volume_id must be specified to perform further actions.")
+def perform_force_reboot(module, session_obj):
+ payload = {"ResetType": "ForceRestart"}
+ job_resp_status, reset_status, reset_fail = wait_for_redfish_reboot_job(session_obj, SYSTEM_ID, payload=payload)
+ if reset_status and job_resp_status:
+ job_uri = MANAGER_JOB_ID_URI.format(job_resp_status["Id"])
+ resp, msg = wait_for_job_completion(session_obj, job_uri, wait_timeout=module.params.get("job_wait_timeout"))
+ if resp:
+ job_data = strip_substr_dict(resp.json_data)
+ if job_data["JobState"] == "Failed":
+ module.exit_json(msg=REBOOT_FAIL, job_status=job_data, failed=True)
+ else:
+ resp = session_obj.invoke_request("GET", job_uri)
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=msg, job_status=job_data)
+
+
+def perform_reboot(module, session_obj):
+ payload = {"ResetType": "GracefulRestart"}
+ force_reboot = module.params.get("force_reboot")
+ job_resp_status, reset_status, reset_fail = wait_for_redfish_reboot_job(session_obj, SYSTEM_ID, payload=payload)
+ if reset_status and job_resp_status:
+ job_uri = MANAGER_JOB_ID_URI.format(job_resp_status["Id"])
+ resp, msg = wait_for_job_completion(session_obj, job_uri, wait_timeout=module.params.get("job_wait_timeout"))
+ if resp:
+ job_data = strip_substr_dict(resp.json_data)
+ if force_reboot and job_data["JobState"] == "Failed":
+ perform_force_reboot(module, session_obj)
+ else:
+ resp = session_obj.invoke_request("GET", job_uri)
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=msg, job_status=job_data)
+
+
+def check_job_tracking_required(module, session_obj, reboot_required, controller_id):
+ job_wait = module.params.get("job_wait")
+ apply_time = None
+ if controller_id:
+ apply_time = get_apply_time(module, session_obj, controller_id)
+ if job_wait:
+ if apply_time == "OnReset" and not reboot_required:
+ return False
+ return True
+ return False
+
+
+def track_job(module, session_obj, job_id, job_url):
+ resp, msg = wait_for_job_completion(session_obj, job_url,
+ wait_timeout=module.params.get("job_wait_timeout"))
+ if resp:
+ job_data = strip_substr_dict(resp.json_data)
+ if job_data["JobState"] == "Failed":
+ changed, failed = False, True
+ module.exit_json(msg=JOB_FAILURE_PROGRESS_MSG, task={"id": job_id, "uri": job_url},
+ changed=changed, job_status=job_data, failed=failed)
+ elif job_data["JobState"] == "Scheduled":
+ task_status = {"uri": job_url, "id": job_id}
+ module.exit_json(msg=JOB_SUBMISSION, task=task_status, job_status=job_data, changed=True)
+ else:
+ changed, failed = True, False
+ module.exit_json(msg=JOB_COMPLETION, task={"id": job_id, "uri": job_url},
+ changed=changed, job_status=job_data, failed=failed)
+ else:
+ module.exit_json(msg=msg)
+
+
+def validate_negative_job_time_out(module):
+ if module.params.get("job_wait") and module.params.get("job_wait_timeout") <= 0:
+ module.exit_json(msg=TIMEOUT_NEGATIVE_OR_ZERO_MSG, failed=True)
+
+
def main():
specs = {
"state": {"type": "str", "required": False, "choices": ['present', 'absent']},
@@ -591,7 +863,10 @@ def main():
"choices": ['NonRedundant', 'Mirrored',
'StripedWithParity', 'SpannedMirrors',
'SpannedStripesWithParity']},
- "name": {"required": False, "type": "str"},
+ "raid_type": {"type": "str", "required": False,
+ "choices": ['RAID0', 'RAID1', 'RAID5',
+ 'RAID6', 'RAID10', 'RAID50', 'RAID60']},
+ "name": {"required": False, "type": "str", "aliases": ['volume_name']},
"controller_id": {"required": False, "type": "str"},
"drives": {"elements": "str", "required": False, "type": "list"},
"block_size_bytes": {"required": False, "type": "int"},
@@ -603,13 +878,18 @@ def main():
"volume_id": {"required": False, "type": "str"},
"oem": {"required": False, "type": "dict"},
"initialize_type": {"type": "str", "required": False, "choices": ['Fast', 'Slow'], "default": "Fast"},
+ "apply_time": {"required": False, "type": "str", "choices": ['Immediate', 'OnReset']},
+ "reboot_server": {"required": False, "type": "bool", "default": False},
+ "force_reboot": {"required": False, "type": "bool", "default": False},
+ "job_wait": {"required": False, "type": "bool", "default": False},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 1200}
}
specs.update(redfish_auth_params)
module = AnsibleModule(
argument_spec=specs,
- mutually_exclusive=[['state', 'command']],
+ mutually_exclusive=[['state', 'command'], ['volume_type', 'raid_type']],
required_one_of=[['state', 'command']],
required_if=[['command', 'initialize', ['volume_id']],
['state', 'absent', ['volume_id']], ],
@@ -617,16 +897,42 @@ def main():
try:
validate_inputs(module)
+ validate_negative_job_time_out(module)
with Redfish(module.params, req_session=True) as session_obj:
fetch_storage_resource(module, session_obj)
+ controller_id = module.params.get("controller_id")
+ volume_id = module.params.get("volume_id")
+ reboot_server = module.params.get("reboot_server")
+ reboot_required = module.params.get("reboot_required")
+ if controller_id:
+ uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
+ resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, CONTROLLER_NOT_EXIST_ERROR.format(controller_id=controller_id))
+ reboot_required = check_apply_time_supported_and_reboot_required(module, session_obj, controller_id)
status_message = configure_raid_operation(module, session_obj)
- task_status = {"uri": status_message.get("task_uri"), "id": status_message.get("task_id")}
- module.exit_json(msg=status_message["msg"], task=task_status, changed=True)
+ if volume_id and reboot_server:
+ controller_id = volume_id.split(":")[-1]
+ uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
+ resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, CONTROLLER_NOT_EXIST_ERROR.format(controller_id=controller_id))
+ reboot_required = check_apply_time_supported_and_reboot_required(module, session_obj, controller_id)
+ if reboot_required:
+ perform_reboot(module, session_obj)
+ job_tracking_required = check_job_tracking_required(module, session_obj, reboot_required, controller_id)
+ job_id = status_message.get("task_id")
+ job_url = MANAGER_JOB_ID_URI.format(job_id)
+ if job_tracking_required:
+ track_job(module, session_obj, job_id, job_url)
+ else:
+ task_status = {"uri": job_url, "id": job_id}
+ resp = session_obj.invoke_request("GET", job_url)
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=status_message["msg"], task=task_status, job_status=job_data, changed=True)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
- except (URLError, SSLValidationError, ConnectionError, ImportError, ValueError,
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, ImportError, ValueError,
RuntimeError, TypeError, OSError, SSLError) as err:
- module.fail_json(msg=str(err))
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':