summaryrefslogtreecommitdiffstats
path: root/ansible_collections/dellemc/openmanage/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'ansible_collections/dellemc/openmanage/plugins')
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/README.md100
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/__init__.py0
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py55
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py36
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py54
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py54
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py54
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py50
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/__init__.py0
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py104
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py377
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py399
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py219
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py350
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/__init__.py0
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py342
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py394
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py148
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py141
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py224
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py505
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py216
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py524
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py820
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py563
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py521
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py651
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py144
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py131
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py134
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py223
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py117
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py444
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py165
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py773
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py132
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py666
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py202
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py120
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py259
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py429
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py468
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py457
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py265
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py260
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py212
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py669
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py751
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py254
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py384
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py264
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py196
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py360
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py611
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py842
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py244
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py526
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py433
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py481
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py302
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py778
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py398
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py341
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py674
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py445
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py518
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py1067
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py344
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py653
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py550
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py420
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py155
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py644
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py452
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py603
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py210
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py283
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py349
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py263
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py277
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py863
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py262
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py425
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py735
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py544
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py993
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py193
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py168
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py448
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py264
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py169
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py335
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py219
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py263
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py633
95 files changed, 34757 insertions, 0 deletions
diff --git a/ansible_collections/dellemc/openmanage/plugins/README.md b/ansible_collections/dellemc/openmanage/plugins/README.md
new file mode 100644
index 000000000..e5200a2da
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/README.md
@@ -0,0 +1,100 @@
+# dellemc.openmanage collections Plugins Directory
+
+Here are the list of modules and module_utils supported by Dell.
+
+```
+├── doc_fragments
+ ├── idrac_auth_options.py
+ ├── network_share_options.py
+ ├── ome_auth_options.py
+ ├── omem_auth_options.py
+ ├── oment_auth_options.py
+ └── redfish_auth_options.py
+├── module_utils
+ ├── dellemc_idrac.py
+ ├── idrac_redfish.py
+ ├── ome.py
+ ├── redfish.py
+ └── utils.py
+└── modules
+ ├── dellemc_configure_idrac_eventing.py
+ ├── dellemc_configure_idrac_services.py
+ ├── dellemc_get_firmware_inventory.py
+ ├── dellemc_get_system_inventory.py
+ ├── dellemc_idrac_lc_attributes.py
+ ├── dellemc_idrac_storage_volume.py
+ ├── dellemc_system_lockdown_mode.py
+ ├── idrac_attributes.py
+ ├── idrac_bios.py
+ ├── idrac_boot.py
+ ├── idrac_certificates.py
+ ├── idrac_firmware.py
+ ├── idrac_firmware_info.py
+ ├── idrac_lifecycle_controller_job_status_info.py
+ ├── idrac_lifecycle_controller_jobs.py
+ ├── idrac_lifecycle_controller_logs.py
+ ├── idrac_lifecycle_controller_status_info.py
+ ├── idrac_network.py
+ ├── idrac_os_deployment.py
+ ├── idrac_redfish_storage_controller.py
+ ├── idrac_reset.py
+ ├── idrac_server_config_profile.py
+ ├── idrac_syslog.py
+ ├── idrac_system_info.py
+ ├── idrac_timezone_ntp.py
+ ├── idrac_user.py
+ ├── idrac_virtual_media.py
+ ├── ome_active_directory.py
+ ├── ome_application_alerts_smtp.py
+ ├── ome_application_alerts_syslog.py
+ ├── ome_application_certificate.py
+ ├── ome_application_console_preferences.py
+ ├── ome_application_network_address.py
+ ├── ome_application_network_proxy.py
+ ├── ome_application_network_settings.py
+ ├── ome_application_network_time.py
+ ├── ome_application_network_webserver.py
+ ├── ome_application_security_settings.py
+ ├── ome_chassis_slots.py
+ ├── ome_configuration_compliance_baseline.py
+ ├── ome_configuration_compliance_info.py
+ ├── ome_device_group.py
+ ├── ome_device_info.py
+ ├── ome_device_local_access_configuration.py
+ ├── ome_device_location.py
+ ├── ome_device_mgmt_network.py
+ ├── ome_device_network_services.py
+ ├── ome_device_power_settings.py
+ ├── ome_device_quick_deploy.py
+ ├── ome_devices.py
+ ├── ome_diagnostics.py
+ ├── ome_discovery.py
+ ├── ome_domain_user_groups.py
+ ├── ome_firmware.py
+ ├── ome_firmware_baseline.py
+ ├── ome_firmware_baseline_compliance_info.py
+ ├── ome_firmware_baseline_info.py
+ ├── ome_firmware_catalog.py
+ ├── ome_groups.py
+ ├── ome_identity_pool.py
+ ├── ome_job_info.py
+ ├── ome_network_port_breakout.py
+ ├── ome_network_vlan.py
+ ├── ome_network_vlan_info.py
+ ├── ome_powerstate.py
+ ├── ome_profile.py
+ ├── ome_server_interface_profile_info.py
+ ├── ome_server_interface_profiles.py
+ ├── ome_smart_fabric.py
+ ├── ome_smart_fabric_uplink.py
+ ├── ome_template.py
+ ├── ome_template_identity_pool.py
+ ├── ome_template_info.py
+ ├── ome_template_network_vlan.py
+ ├── ome_user.py
+ ├── ome_user_info.py
+ ├── redfish_event_subscription.py
+ ├── redfish_firmware.py
+ ├── redfish_powerstate.py
+ └── redfish_storage_volume.py
+``` \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/__init__.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/__init__.py
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py
new file mode 100644
index 000000000..5ca16d6d7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ idrac_ip:
+ required: True
+ type: str
+ description: iDRAC IP Address.
+ idrac_user:
+ required: True
+ type: str
+ description: iDRAC username.
+ idrac_password:
+ required: True
+ type: str
+ description: iDRAC user password.
+ aliases: ['idrac_pwd']
+ idrac_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ type: bool
+ default: True
+ version_added: 5.0.0
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ version_added: 5.0.0
+ timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ version_added: 5.0.0
+'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py
new file mode 100644
index 000000000..f0ebb7e3a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 3.0.0
+# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ share_name:
+ required: True
+ type: str
+ description: Network share or a local path.
+ share_user:
+ type: str
+ description: Network share user name. Use the format 'user@domain' or 'domain\\user' if user is part of a domain.
+ This option is mandatory for CIFS share.
+ share_password:
+ type: str
+ description: Network share user password. This option is mandatory for CIFS share.
+ aliases: ['share_pwd']
+ share_mnt:
+ type: str
+ description: Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for network shares.
+'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py
new file mode 100644
index 000000000..b84c50d55
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description: OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+ type: str
+ required: True
+ username:
+ description: OpenManage Enterprise or OpenManage Enterprise Modular username.
+ type: str
+ required: True
+ password:
+ description: OpenManage Enterprise or OpenManage Enterprise Modular password.
+ type: str
+ required: True
+ port:
+ description: OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+ type: int
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ type: bool
+ default: True
+ version_added: 5.0.0
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ version_added: 5.0.0
+ timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ version_added: 5.0.0
+'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py
new file mode 100644
index 000000000..d8c616b2a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description: OpenManage Enterprise Modular IP address or hostname.
+ type: str
+ required: True
+ username:
+ description: OpenManage Enterprise Modular username.
+ type: str
+ required: True
+ password:
+ description: OpenManage Enterprise Modular password.
+ type: str
+ required: True
+ port:
+ description: OpenManage Enterprise Modular HTTPS port.
+ type: int
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ type: bool
+ default: True
+ version_added: 5.0.0
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ version_added: 5.0.0
+ timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ version_added: 5.0.0
+'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py
new file mode 100644
index 000000000..85b1553f7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description: OpenManage Enterprise IP address or hostname.
+ type: str
+ required: True
+ username:
+ description: OpenManage Enterprise username.
+ type: str
+ required: True
+ password:
+ description: OpenManage Enterprise password.
+ type: str
+ required: True
+ port:
+ description: OpenManage Enterprise HTTPS port.
+ type: int
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ type: bool
+ default: True
+ version_added: 5.0.0
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ version_added: 5.0.0
+ timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ version_added: 5.0.0
+'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py
new file mode 100644
index 000000000..8eb1eda15
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ baseuri:
+ description: "IP address of the target out-of-band controller. For example- <ipaddress>:<port>."
+ type: str
+ required: True
+ username:
+ description: Username of the target out-of-band controller.
+ type: str
+ required: True
+ password:
+ description: Password of the target out-of-band controller.
+ type: str
+ required: True
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ type: bool
+ default: True
+ version_added: 5.0.0
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ version_added: 5.0.0
+ timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ version_added: 5.0.0
+'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/__init__.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/__init__.py
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
new file mode 100644
index 000000000..fee5339c5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import os
+try:
+ from omsdk.sdkinfra import sdkinfra
+ from omsdk.sdkcreds import UserCredentials
+ from omsdk.sdkfile import FileOnShare, file_share_manager
+ from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum
+ from omsdk.http.sdkwsmanbase import WsManOptions
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+idrac_auth_params = {
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+ "validate_certs": {"type": "bool", "default": True},
+ "ca_path": {"type": "path"},
+ "timeout": {"type": "int", "default": 30},
+}
+
+
+class iDRACConnection:
+
+ def __init__(self, module_params):
+ if not HAS_OMSDK:
+ raise ImportError("Dell EMC OMSDK library is required for this module")
+ self.idrac_ip = module_params['idrac_ip']
+ self.idrac_user = module_params['idrac_user']
+ self.idrac_pwd = module_params['idrac_password']
+ self.idrac_port = module_params['idrac_port']
+ if not all((self.idrac_ip, self.idrac_user, self.idrac_pwd)):
+ raise ValueError("hostname, username and password required")
+ self.handle = None
+ self.creds = UserCredentials(self.idrac_user, self.idrac_pwd)
+ self.validate_certs = module_params.get("validate_certs", False)
+ self.ca_path = module_params.get("ca_path")
+ verify_ssl = False
+ if self.validate_certs is True:
+ if self.ca_path is None:
+ self.ca_path = self._get_omam_ca_env()
+ verify_ssl = self.ca_path
+ timeout = module_params.get("timeout", 30)
+ if not timeout or type(timeout) != int:
+ timeout = 30
+ self.pOp = WsManOptions(port=self.idrac_port, read_timeout=timeout, verify_ssl=verify_ssl)
+ self.sdk = sdkinfra()
+ if self.sdk is None:
+ msg = "Could not initialize iDRAC drivers."
+ raise RuntimeError(msg)
+
+ def __enter__(self):
+ self.sdk.importPath()
+ protopref = ProtoPreference(ProtocolEnum.WSMAN)
+ protopref.include_only(ProtocolEnum.WSMAN)
+ self.handle = self.sdk.get_driver(self.sdk.driver_enum.iDRAC, self.idrac_ip, self.creds,
+ protopref=protopref, pOptions=self.pOp)
+ if self.handle is None:
+ msg = "Unable to communicate with iDRAC {0}. This may be due to one of the following: " \
+ "Incorrect username or password, unreachable iDRAC IP or " \
+ "a failure in TLS/SSL handshake.".format(self.idrac_ip)
+ raise RuntimeError(msg)
+ return self.handle
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.handle.disconnect()
+ return False
+
+ def _get_omam_ca_env(self):
+ """Check if the value is set in REQUESTS_CA_BUNDLE or CURL_CA_BUNDLE or OMAM_CA_BUNDLE or True as ssl has to
+ be validated from omsdk with single param and is default to false in omsdk"""
+ return (os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE")
+ or os.environ.get("OMAM_CA_BUNDLE") or True)
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py
new file mode 100644
index 000000000..168c8277d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py
@@ -0,0 +1,377 @@
+# -*- coding: utf-8 -*-
+
+# Dell EMC OpenManage Ansible Modules
+# Version 5.5.0
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import re
+import time
+import os
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+idrac_auth_params = {
+ "idrac_ip": {"required": True, "type": 'str'},
+ "idrac_user": {"required": True, "type": 'str'},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_port": {"required": False, "default": 443, "type": 'int'},
+ "validate_certs": {"type": "bool", "default": True},
+ "ca_path": {"type": "path"},
+ "timeout": {"type": "int", "default": 30},
+
+}
+
+SESSION_RESOURCE_COLLECTION = {
+ "SESSION": "/redfish/v1/Sessions",
+ "SESSION_ID": "/redfish/v1/Sessions/{Id}",
+}
+MANAGER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1"
+EXPORT_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Actions/Oem/EID_674_Manager.ExportSystemConfiguration"
+IMPORT_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Actions/Oem/EID_674_Manager.ImportSystemConfiguration"
+IMPORT_PREVIEW = "/redfish/v1/Managers/iDRAC.Embedded.1/Actions/Oem/EID_674_Manager.ImportSystemConfigurationPreview"
+
+
+class OpenURLResponse(object):
+ """Handles HTTPResponse"""
+
+ def __init__(self, resp):
+ self.body = None
+ self.resp = resp
+ if self.resp:
+ self.body = self.resp.read()
+
+ @property
+ def json_data(self):
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ raise ValueError("Unable to parse json")
+
+ @property
+ def status_code(self):
+ return self.resp.getcode()
+
+ @property
+ def success(self):
+ status = self.status_code
+ return status >= 200 & status <= 299
+
+ @property
+ def headers(self):
+ return self.resp.headers
+
+ @property
+ def reason(self):
+ return self.resp.reason
+
+
+class iDRACRedfishAPI(object):
+ """REST api for iDRAC modules."""
+
+ def __init__(self, module_params, req_session=False):
+ self.ipaddress = module_params['idrac_ip']
+ self.username = module_params['idrac_user']
+ self.password = module_params['idrac_password']
+ self.port = module_params['idrac_port']
+ self.validate_certs = module_params.get("validate_certs", False)
+ self.ca_path = module_params.get("ca_path")
+ self.timeout = module_params.get("timeout", 30)
+ self.use_proxy = module_params.get("use_proxy", True)
+ self.req_session = req_session
+ self.session_id = None
+ self.protocol = 'https'
+ self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+
+ def _get_url(self, uri):
+ return "{0}://{1}:{2}{3}".format(self.protocol, self.ipaddress, self.port, uri)
+
+ def _build_url(self, path, query_param=None):
+ """builds complete url"""
+ url = path
+ base_uri = self._get_url(url)
+ if path:
+ url = base_uri
+ if query_param:
+ url += "?{0}".format(urlencode(query_param))
+ return url
+
+ def _url_common_args_spec(self, method, api_timeout, headers=None):
+ """Creates an argument common spec"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ if api_timeout is None:
+ api_timeout = self.timeout
+ if self.ca_path is None:
+ self.ca_path = self._get_omam_ca_env()
+ url_kwargs = {
+ "method": method,
+ "validate_certs": self.validate_certs,
+ "ca_path": self.ca_path,
+ "use_proxy": self.use_proxy,
+ "headers": req_header,
+ "timeout": api_timeout,
+ "follow_redirects": 'all',
+ }
+ return url_kwargs
+
+ def _args_without_session(self, path, method, api_timeout, headers=None):
+ """Creates an argument spec in case of basic authentication"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ if not (path == SESSION_RESOURCE_COLLECTION["SESSION"] and method == 'POST'):
+ url_kwargs["url_username"] = self.username
+ url_kwargs["url_password"] = self.password
+ url_kwargs["force_basic_auth"] = True
+ return url_kwargs
+
+ def _args_with_session(self, method, api_timeout, headers=None):
+ """Creates an argument spec, in case of authentication with session"""
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ url_kwargs["force_basic_auth"] = False
+ return url_kwargs
+
+ def invoke_request(self, uri, method, data=None, query_param=None, headers=None, api_timeout=None, dump=True):
+ try:
+ if 'X-Auth-Token' in self._headers:
+ url_kwargs = self._args_with_session(method, api_timeout, headers=headers)
+ else:
+ url_kwargs = self._args_without_session(uri, method, api_timeout, headers=headers)
+ if data and dump:
+ data = json.dumps(data)
+ url = self._build_url(uri, query_param=query_param)
+ resp = open_url(url, data=data, **url_kwargs)
+ resp_data = OpenURLResponse(resp)
+ except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:
+ raise err
+ return resp_data
+
+ def __enter__(self):
+ """Creates sessions by passing it to header"""
+ if self.req_session:
+ payload = {'UserName': self.username,
+ 'Password': self.password}
+ path = SESSION_RESOURCE_COLLECTION["SESSION"]
+ resp = self.invoke_request(path, 'POST', data=payload)
+ if resp and resp.success:
+ self.session_id = resp.json_data.get("Id")
+ self._headers["X-Auth-Token"] = resp.headers.get('X-Auth-Token')
+ else:
+ msg = "Could not create the session"
+ raise ConnectionError(msg)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """Deletes a session id, which is in use for request"""
+ if self.session_id:
+ path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id)
+ self.invoke_request(path, 'DELETE')
+ return False
+
+ @property
+ def get_server_generation(self):
+ """
+ This method fetches the connected server generation.
+ :return: 14, 4.11.11.11
+ """
+ model, firmware_version = None, None
+ response = self.invoke_request(MANAGER_URI, 'GET')
+ if response.status_code == 200:
+ generation = int(re.search(r"\d+(?=G)", response.json_data["Model"]).group())
+ firmware_version = response.json_data["FirmwareVersion"]
+ return generation, firmware_version
+
+ def wait_for_job_complete(self, task_uri, job_wait=False):
+ """
+ This function wait till the job completion.
+ :param task_uri: uri to track job.
+ :param job_wait: True or False decide whether to wait till the job completion.
+ :return: object
+ """
+ response = None
+ while job_wait:
+ try:
+ response = self.invoke_request(task_uri, "GET")
+ if response.json_data.get("TaskState") == "Running":
+ time.sleep(10)
+ else:
+ break
+ except ValueError:
+ response = response.body
+ break
+ return response
+
+ def wait_for_job_completion(self, job_uri, job_wait=False, reboot=False, apply_update=False):
+ """
+ This function wait till the job completion.
+ :param job_uri: uri to track job.
+ :param job_wait: True or False decide whether to wait till the job completion.
+ :return: object
+ """
+ time.sleep(5)
+ response = self.invoke_request(job_uri, "GET")
+ while job_wait:
+ response = self.invoke_request(job_uri, "GET")
+ if response.json_data.get("PercentComplete") == 100 and \
+ response.json_data.get("JobState") == "Completed":
+ break
+ if response.json_data.get("JobState") == "Starting" and not reboot and apply_update:
+ break
+ time.sleep(30)
+ return response
+
+ def export_scp(self, export_format=None, export_use=None, target=None,
+ job_wait=False, share=None):
+ """
+ This method exports system configuration details from the system.
+ :param export_format: XML or JSON.
+ :param export_use: Default or Clone or Replace.
+ :param target: IDRAC or NIC or ALL or BIOS or RAID.
+ :param job_wait: True or False decide whether to wait till the job completion.
+ :return: exported data in requested format.
+ """
+ payload = {"ExportFormat": export_format, "ExportUse": export_use,
+ "ShareParameters": {"Target": target}}
+ if share is None:
+ share = {}
+ if share.get("share_ip") is not None:
+ payload["ShareParameters"]["IPAddress"] = share["share_ip"]
+ if share.get("share_name") is not None and share.get("share_name"):
+ payload["ShareParameters"]["ShareName"] = share["share_name"]
+ if share.get("share_type") is not None:
+ payload["ShareParameters"]["ShareType"] = share["share_type"]
+ if share.get("file_name") is not None:
+ payload["ShareParameters"]["FileName"] = share["file_name"]
+ if share.get("username") is not None:
+ payload["ShareParameters"]["Username"] = share["username"]
+ if share.get("password") is not None:
+ payload["ShareParameters"]["Password"] = share["password"]
+ response = self.invoke_request(EXPORT_URI, "POST", data=payload)
+ if response.status_code == 202 and job_wait:
+ task_uri = response.headers["Location"]
+ response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
+ return response
+
+ def import_scp_share(self, shutdown_type=None, host_powerstate=None, job_wait=True,
+ target=None, import_buffer=None, share=None):
+ """
+ This method imports system configuration using share.
+ :param shutdown_type: graceful
+ :param host_powerstate: on
+ :param file_name: import.xml
+ :param job_wait: True
+ :param target: iDRAC
+ :param share: dictionary which has all the share details.
+ :return: json response
+ """
+ payload = {"ShutdownType": shutdown_type, "EndHostPowerState": host_powerstate,
+ "ShareParameters": {"Target": target}}
+ if import_buffer is not None:
+ payload["ImportBuffer"] = import_buffer
+ if share is None:
+ share = {}
+ if share.get("share_ip") is not None:
+ payload["ShareParameters"]["IPAddress"] = share["share_ip"]
+ if share.get("share_name") is not None and share.get("share_name"):
+ payload["ShareParameters"]["ShareName"] = share["share_name"]
+ if share.get("share_type") is not None:
+ payload["ShareParameters"]["ShareType"] = share["share_type"]
+ if share.get("file_name") is not None:
+ payload["ShareParameters"]["FileName"] = share["file_name"]
+ if share.get("username") is not None:
+ payload["ShareParameters"]["Username"] = share["username"]
+ if share.get("password") is not None:
+ payload["ShareParameters"]["Password"] = share["password"]
+ response = self.invoke_request(IMPORT_URI, "POST", data=payload)
+ if response.status_code == 202 and job_wait:
+ task_uri = response.headers["Location"]
+ response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
+ return response
+
+ def import_preview(self, import_buffer=None, target=None, share=None, job_wait=False):
+ payload = {"ShareParameters": {"Target": target}}
+ if import_buffer is not None:
+ payload["ImportBuffer"] = import_buffer
+ if share is None:
+ share = {}
+ if share.get("share_ip") is not None:
+ payload["ShareParameters"]["IPAddress"] = share["share_ip"]
+ if share.get("share_name") is not None and share.get("share_name"):
+ payload["ShareParameters"]["ShareName"] = share["share_name"]
+ if share.get("share_type") is not None:
+ payload["ShareParameters"]["ShareType"] = share["share_type"]
+ if share.get("file_name") is not None:
+ payload["ShareParameters"]["FileName"] = share["file_name"]
+ if share.get("username") is not None:
+ payload["ShareParameters"]["Username"] = share["username"]
+ if share.get("password") is not None:
+ payload["ShareParameters"]["Password"] = share["password"]
+ response = self.invoke_request(IMPORT_PREVIEW, "POST", data=payload)
+ if response.status_code == 202 and job_wait:
+ task_uri = response.headers["Location"]
+ response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
+ return response
+
+ def import_scp(self, import_buffer=None, target=None, job_wait=False):
+ """
+ This method imports system configuration details to the system.
+ :param import_buffer: import buffer payload content xml or json format
+ :param target: IDRAC or NIC or ALL or BIOS or RAID.
+ :param job_wait: True or False decide whether to wait till the job completion.
+ :return: json response
+ """
+ payload = {"ImportBuffer": import_buffer, "ShareParameters": {"Target": target}}
+ response = self.invoke_request(IMPORT_URI, "POST", data=payload)
+ if response.status_code == 202 and job_wait:
+ task_uri = response.headers["Location"]
+ response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
+ return response
+
+ def get_idrac_local_account_attr(self, idrac_attribues, fqdd=None):
+ """
+ This method filtered from all the user attributes from the given idrac attributes.
+ :param idrac_attribues: all the idrac attribues in json data format.
+ :return: user attributes in dictionary format
+ """
+ user_attr = None
+ if "SystemConfiguration" in idrac_attribues:
+ sys_config = idrac_attribues.get("SystemConfiguration")
+ for comp in sys_config.get("Components"):
+ if comp.get("FQDD") == fqdd:
+ attributes = comp.get("Attributes")
+ break
+ user_attr = dict([(attr["Name"], attr["Value"]) for attr in attributes if attr["Name"].startswith("Users.")])
+ return user_attr
+
+ def _get_omam_ca_env(self):
+ """Check if the value is set in REQUESTS_CA_BUNDLE or CURL_CA_BUNDLE or OMAM_CA_BUNDLE or returns None"""
+ return os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") or os.environ.get("OMAM_CA_BUNDLE")
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py
new file mode 100644
index 000000000..cdb5ddf2c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py
@@ -0,0 +1,399 @@
+# -*- coding: utf-8 -*-
+
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import os
+import time
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+ome_auth_params = {
+ "hostname": {"required": True, "type": "str"},
+ "username": {"required": True, "type": "str"},
+ "password": {"required": True, "type": "str", "no_log": True},
+ "port": {"type": "int", "default": 443},
+ "validate_certs": {"type": "bool", "default": True},
+ "ca_path": {"type": "path"},
+ "timeout": {"type": "int", "default": 30},
+}
+
+SESSION_RESOURCE_COLLECTION = {
+ "SESSION": "SessionService/Sessions",
+ "SESSION_ID": "SessionService/Sessions('{Id}')",
+}
+
+JOB_URI = "JobService/Jobs({job_id})"
+JOB_SERVICE_URI = "JobService/Jobs"
+
+
+class OpenURLResponse(object):
+ """Handles HTTPResponse"""
+
+ def __init__(self, resp):
+ self.body = None
+ self.resp = resp
+ if self.resp:
+ self.body = self.resp.read()
+
+ @property
+ def json_data(self):
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ raise ValueError("Unable to parse json")
+
+ @property
+ def status_code(self):
+ return self.resp.getcode()
+
+ @property
+ def success(self):
+ return self.status_code in (200, 201, 202, 204)
+
+ @property
+ def token_header(self):
+ return self.resp.headers.get('X-Auth-Token')
+
+
+class RestOME(object):
+ """Handles OME API requests"""
+
+ def __init__(self, module_params=None, req_session=False):
+ self.module_params = module_params
+ self.hostname = self.module_params["hostname"]
+ self.username = self.module_params["username"]
+ self.password = self.module_params["password"]
+ self.port = self.module_params["port"]
+ self.validate_certs = self.module_params.get("validate_certs", True)
+ self.ca_path = self.module_params.get("ca_path")
+ self.timeout = self.module_params.get("timeout", 30)
+ self.req_session = req_session
+ self.session_id = None
+ self.protocol = 'https'
+ self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+
+ def _get_base_url(self):
+ """builds base url"""
+ return '{0}://{1}:{2}/api'.format(self.protocol, self.hostname, self.port)
+
+ def _build_url(self, path, query_param=None):
+ """builds complete url"""
+ url = path
+ base_uri = self._get_base_url()
+ if path:
+ url = '{0}/{1}'.format(base_uri, path)
+ if query_param:
+ """Ome filtering does not work as expected when '+' is passed,
+ urlencode will encode spaces as '+' so replace it to '%20'"""
+ url += "?{0}".format(urlencode(query_param).replace('+', '%20'))
+ return url
+
+ def _url_common_args_spec(self, method, api_timeout, headers=None):
+ """Creates an argument common spec"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ if api_timeout is None:
+ api_timeout = self.timeout
+ if self.ca_path is None:
+ self.ca_path = self._get_omam_ca_env()
+ url_kwargs = {
+ "method": method,
+ "validate_certs": self.validate_certs,
+ "ca_path": self.ca_path,
+ "use_proxy": True,
+ "headers": req_header,
+ "timeout": api_timeout,
+ "follow_redirects": 'all',
+ }
+ return url_kwargs
+
+ def _args_without_session(self, method, api_timeout, headers=None):
+ """Creates an argument spec in case of basic authentication"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ url_kwargs["url_username"] = self.username
+ url_kwargs["url_password"] = self.password
+ url_kwargs["force_basic_auth"] = True
+ return url_kwargs
+
+ def _args_with_session(self, method, api_timeout, headers=None):
+ """Creates an argument spec, in case of authentication with session"""
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ url_kwargs["force_basic_auth"] = False
+ return url_kwargs
+
+ def invoke_request(self, method, path, data=None, query_param=None, headers=None,
+ api_timeout=None, dump=True):
+ """
+ Sends a request through open_url
+ Returns :class:`OpenURLResponse` object.
+ :arg method: HTTP verb to use for the request
+ :arg path: path to request without query parameter
+ :arg data: (optional) Payload to send with the request
+ :arg query_param: (optional) Dictionary of query parameter to send with request
+ :arg headers: (optional) Dictionary of HTTP Headers to send with the
+ request
+ :arg api_timeout: (optional) How long to wait for the server to send
+ data before giving up
+ :arg dump: (Optional) boolean value for dumping payload data.
+ :returns: OpenURLResponse
+ """
+ try:
+ if 'X-Auth-Token' in self._headers:
+ url_kwargs = self._args_with_session(method, api_timeout, headers=headers)
+ else:
+ url_kwargs = self._args_without_session(method, api_timeout, headers=headers)
+ if data and dump:
+ data = json.dumps(data)
+ url = self._build_url(path, query_param=query_param)
+ resp = open_url(url, data=data, **url_kwargs)
+ resp_data = OpenURLResponse(resp)
+ except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:
+ raise err
+ return resp_data
+
+ def __enter__(self):
+ """Creates sessions by passing it to header"""
+ if self.req_session:
+ payload = {'UserName': self.username,
+ 'Password': self.password,
+ 'SessionType': 'API', }
+ path = SESSION_RESOURCE_COLLECTION["SESSION"]
+ resp = self.invoke_request('POST', path, data=payload)
+ if resp and resp.success:
+ self.session_id = resp.json_data.get("Id")
+ self._headers["X-Auth-Token"] = resp.token_header
+ else:
+ msg = "Could not create the session"
+ raise ConnectionError(msg)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """Deletes a session id, which is in use for request"""
+ if self.session_id:
+ path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id)
+ self.invoke_request('DELETE', path)
+ return False
+
+ def get_all_report_details(self, uri):
+ """
+ This implementation mainly dependent on '@odata.count' value.
+ Currently first request without query string, always returns total number of available
+ reports in '@odata.count'.
+ """
+ try:
+ resp = self.invoke_request('GET', uri)
+ data = resp.json_data
+ report_list = data["value"]
+ total_count = data['@odata.count']
+ remaining_count = total_count - len(report_list)
+ first_page_count = len(report_list)
+ while remaining_count > 0:
+ resp = self.invoke_request('GET', uri,
+ query_param={"$top": first_page_count, "$skip": len(report_list)})
+ data = resp.json_data
+ value = data["value"]
+ report_list.extend(value)
+ remaining_count = remaining_count - len(value)
+ return {"resp_obj": resp, "report_list": report_list}
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+ def get_job_type_id(self, jobtype_name):
+ """This provides an ID of the job type."""
+ job_type_id = None
+ resp = self.invoke_request('GET', "JobService/JobTypes")
+ data = resp.json_data["value"]
+ for each in data:
+ if each["Name"] == jobtype_name:
+ job_type_id = each["Id"]
+ break
+ return job_type_id
+
+ def get_device_id_from_service_tag(self, service_tag):
+ """
+ :param service_tag: service tag of the device
+ :return: dict
+ Id: int: device id
+ value: dict: device id details
+ not_found_msg: str: message if service tag not found
+ """
+ device_id = None
+ query = "DeviceServiceTag eq '{0}'".format(service_tag)
+ response = self.invoke_request("GET", "DeviceService/Devices", query_param={"$filter": query})
+ value = response.json_data.get("value", [])
+ device_info = {}
+ if value:
+ device_info = value[0]
+ device_id = device_info["Id"]
+ return {"Id": device_id, "value": device_info}
+
+ def get_all_items_with_pagination(self, uri):
+ """
+ This implementation mainly to get all available items from ome for pagination
+ supported GET uri
+ :param uri: uri which supports pagination
+ :return: dict.
+ """
+ try:
+ resp = self.invoke_request('GET', uri)
+ data = resp.json_data
+ total_items = data.get("value", [])
+ total_count = data.get('@odata.count', 0)
+ next_link = data.get('@odata.nextLink', '')
+ while next_link:
+ resp = self.invoke_request('GET', next_link.split('/api')[-1])
+ data = resp.json_data
+ value = data["value"]
+ next_link = data.get('@odata.nextLink', '')
+ total_items.extend(value)
+ return {"total_count": total_count, "value": total_items}
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+ def get_device_type(self):
+ """
+ Returns device type map where as key is type and value is type name
+ eg: {1000: "SERVER", 2000: "CHASSIS", 4000: "NETWORK_IOM", "8000": "STORAGE_IOM", 3000: "STORAGE"}
+ :return: dict, first item dict gives device type map
+ """
+ device_map = {}
+ response = self.invoke_request("GET", "DeviceService/DeviceType")
+ if response.json_data.get("value"):
+ device_map = dict([(item["DeviceType"], item["Name"]) for item in response.json_data["value"]])
+ return device_map
+
+ def get_job_info(self, job_id):
+ try:
+ job_status_map = {
+ 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "Completed",
+ 2070: "Failed", 2090: "Warning", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped",
+ 2103: "Canceled"
+ }
+ failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]
+ job_url = JOB_URI.format(job_id=job_id)
+ job_resp = self.invoke_request('GET', job_url)
+ job_dict = job_resp.json_data
+ job_status = job_dict['LastRunStatus']['Id']
+ if job_status in [2060, 2020]:
+ job_failed = False
+ message = "Job {0} successfully.".format(job_status_map[job_status])
+ exit_poll = True
+ return exit_poll, job_failed, message
+ elif job_status in failed_job_status:
+ exit_poll = True
+ job_failed = True
+ message = "Job is in {0} state, and is not completed.".format(job_status_map[job_status])
+ return exit_poll, job_failed, message
+ return False, False, None
+ except HTTPError:
+ job_failed = True
+ message = "Unable to track the job status of {0}.".format(job_id)
+ exit_poll = True
+ return exit_poll, job_failed, message
+
+ def job_tracking(self, job_id, job_wait_sec=600, sleep_time=60):
+ """
+ job_id: job id
+ job_wait_sec: Maximum time to wait to fetch the final job details in seconds
+ sleep_time: Maximum time to sleep in seconds in each job details fetch
+ """
+ max_sleep_time = job_wait_sec
+ sleep_interval = sleep_time
+ while max_sleep_time:
+ if max_sleep_time > sleep_interval:
+ max_sleep_time = max_sleep_time - sleep_interval
+ else:
+ sleep_interval = max_sleep_time
+ max_sleep_time = 0
+ time.sleep(sleep_interval)
+ exit_poll, job_failed, job_message = self.get_job_info(job_id)
+ if exit_poll is True:
+ return job_failed, job_message
+ return True, "The job is not complete after {0} seconds.".format(job_wait_sec)
+
+ def strip_substr_dict(self, odata_dict, chkstr='@odata.'):
+ cp = odata_dict.copy()
+ klist = cp.keys()
+ for k in klist:
+ if chkstr in str(k).lower():
+ odata_dict.pop(k)
+ return odata_dict
+
+ def job_submission(self, job_name, job_desc, targets, params, job_type,
+ schedule="startnow", state="Enabled"):
+ job_payload = {"JobName": job_name, "JobDescription": job_desc,
+ "Schedule": schedule, "State": state, "Targets": targets,
+ "Params": params, "JobType": job_type}
+ response = self.invoke_request("POST", JOB_SERVICE_URI, data=job_payload)
+ return response
+
+ def test_network_connection(self, share_address, share_path, share_type,
+ share_user=None, share_password=None, share_domain=None):
+ job_type = {"Id": 56, "Name": "ValidateNWFileShare_Task"}
+ params = [
+ {"Key": "checkPathOnly", "Value": "false"},
+ {"Key": "shareType", "Value": share_type},
+ {"Key": "ShareNetworkFilePath", "Value": share_path},
+ {"Key": "shareAddress", "Value": share_address},
+ {"Key": "testShareWriteAccess", "Value": "true"}
+ ]
+ if share_user is not None:
+ params.append({"Key": "UserName", "Value": share_user})
+ if share_password is not None:
+ params.append({"Key": "Password", "Value": share_password})
+ if share_domain is not None:
+ params.append({"Key": "domainName", "Value": share_domain})
+ job_response = self.job_submission("Validate Share", "Validate Share", [], params, job_type)
+ return job_response
+
+ def check_existing_job_state(self, job_type_name):
+ query_param = {"$filter": "LastRunStatus/Id eq 2030 or LastRunStatus/Id eq 2040 or LastRunStatus/Id eq 2050"}
+ job_resp = self.invoke_request("GET", JOB_SERVICE_URI, query_param=query_param)
+ job_lst = job_resp.json_data["value"] if job_resp.json_data.get("value") is not None else []
+ for job in job_lst:
+ if job["JobType"]["Name"] == job_type_name:
+ job_allowed = False
+ available_jobs = job
+ break
+ else:
+ job_allowed = True
+ available_jobs = job_lst
+ return job_allowed, available_jobs
+
+ def _get_omam_ca_env(self):
+ """Check if the value is set in REQUESTS_CA_BUNDLE or CURL_CA_BUNDLE or OMAM_CA_BUNDLE or returns None"""
+ return os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") or os.environ.get("OMAM_CA_BUNDLE")
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py
new file mode 100644
index 000000000..59c467057
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py
@@ -0,0 +1,219 @@
+# -*- coding: utf-8 -*-
+
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+redfish_auth_params = {
+ "baseuri": {"required": True, "type": "str"},
+ "username": {"required": True, "type": "str"},
+ "password": {"required": True, "type": "str", "no_log": True},
+ "validate_certs": {"type": "bool", "default": True},
+ "ca_path": {"type": "path"},
+ "timeout": {"type": "int", "default": 30},
+}
+
+SESSION_RESOURCE_COLLECTION = {
+ "SESSION": "/redfish/v1/Sessions",
+ "SESSION_ID": "/redfish/v1/Sessions/{Id}",
+}
+
+
+class OpenURLResponse(object):
+ """Handles HTTPResponse"""
+
+ def __init__(self, resp):
+ self.body = None
+ self.resp = resp
+ if self.resp:
+ self.body = self.resp.read()
+
+ @property
+ def json_data(self):
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ raise ValueError("Unable to parse json")
+
+ @property
+ def status_code(self):
+ return self.resp.getcode()
+
+ @property
+ def success(self):
+ status = self.status_code
+ return status >= 200 & status <= 299
+
+ @property
+ def headers(self):
+ return self.resp.headers
+
+ @property
+ def reason(self):
+ return self.resp.reason
+
+
+class Redfish(object):
+ """Handles iDRAC Redfish API requests"""
+
+ def __init__(self, module_params=None, req_session=False):
+ self.module_params = module_params
+ self.hostname = self.module_params["baseuri"]
+ self.username = self.module_params["username"]
+ self.password = self.module_params["password"]
+ self.validate_certs = self.module_params.get("validate_certs", True)
+ self.ca_path = self.module_params.get("ca_path")
+ self.timeout = self.module_params.get("timeout", 30)
+ self.use_proxy = self.module_params.get("use_proxy", True)
+ self.req_session = req_session
+ self.session_id = None
+ self.protocol = 'https'
+ self.root_uri = '/redfish/v1/'
+ self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+
+ def _get_base_url(self):
+ """builds base url"""
+ return '{0}://{1}'.format(self.protocol, self.hostname)
+
+ def _build_url(self, path, query_param=None):
+ """builds complete url"""
+ url = path
+ base_uri = self._get_base_url()
+ if path:
+ url = base_uri + path
+ if query_param:
+ url += "?{0}".format(urlencode(query_param))
+ return url
+
+ def _url_common_args_spec(self, method, api_timeout, headers=None):
+ """Creates an argument common spec"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ if api_timeout is None:
+ api_timeout = self.timeout
+ if self.ca_path is None:
+ self.ca_path = self._get_omam_ca_env()
+ url_kwargs = {
+ "method": method,
+ "validate_certs": self.validate_certs,
+ "ca_path": self.ca_path,
+ "use_proxy": self.use_proxy,
+ "headers": req_header,
+ "timeout": api_timeout,
+ "follow_redirects": 'all',
+ }
+ return url_kwargs
+
+ def _args_without_session(self, path, method, api_timeout, headers=None):
+ """Creates an argument spec in case of basic authentication"""
+ req_header = self._headers
+ if headers:
+ req_header.update(headers)
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ if not (path == SESSION_RESOURCE_COLLECTION["SESSION"] and method == 'POST'):
+ url_kwargs["url_username"] = self.username
+ url_kwargs["url_password"] = self.password
+ url_kwargs["force_basic_auth"] = True
+ return url_kwargs
+
+ def _args_with_session(self, method, api_timeout, headers=None):
+ """Creates an argument spec, in case of authentication with session"""
+ url_kwargs = self._url_common_args_spec(method, api_timeout, headers=headers)
+ url_kwargs["force_basic_auth"] = False
+ return url_kwargs
+
+ def invoke_request(self, method, path, data=None, query_param=None, headers=None,
+ api_timeout=None, dump=True):
+ """
+ Sends a request through open_url
+ Returns :class:`OpenURLResponse` object.
+ :arg method: HTTP verb to use for the request
+ :arg path: path to request without query parameter
+ :arg data: (optional) Payload to send with the request
+ :arg query_param: (optional) Dictionary of query parameter to send with request
+ :arg headers: (optional) Dictionary of HTTP Headers to send with the
+ request
+ :arg api_timeout: (optional) How long to wait for the server to send
+ data before giving up
+ :arg dump: (Optional) boolean value for dumping payload data.
+ :returns: OpenURLResponse
+ """
+ try:
+ if 'X-Auth-Token' in self._headers:
+ url_kwargs = self._args_with_session(method, api_timeout, headers=headers)
+ else:
+ url_kwargs = self._args_without_session(path, method, api_timeout, headers=headers)
+ if data and dump:
+ data = json.dumps(data)
+ url = self._build_url(path, query_param=query_param)
+ resp = open_url(url, data=data, **url_kwargs)
+ resp_data = OpenURLResponse(resp)
+ except (HTTPError, URLError, SSLValidationError, ConnectionError) as err:
+ raise err
+ return resp_data
+
+ def __enter__(self):
+ """Creates sessions by passing it to header"""
+ if self.req_session:
+ payload = {'UserName': self.username,
+ 'Password': self.password}
+ path = SESSION_RESOURCE_COLLECTION["SESSION"]
+ resp = self.invoke_request('POST', path, data=payload)
+ if resp and resp.success:
+ self.session_id = resp.json_data.get("Id")
+ self._headers["X-Auth-Token"] = resp.headers.get('X-Auth-Token')
+ else:
+ msg = "Could not create the session"
+ raise ConnectionError(msg)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ """Deletes a session id, which is in use for request"""
+ if self.session_id:
+ path = SESSION_RESOURCE_COLLECTION["SESSION_ID"].format(Id=self.session_id)
+ self.invoke_request('DELETE', path)
+ return False
+
+ def strip_substr_dict(self, odata_dict, chkstr='@odata.'):
+ cp = odata_dict.copy()
+ klist = cp.keys()
+ for k in klist:
+ if chkstr in str(k).lower():
+ odata_dict.pop(k)
+ return odata_dict
+
+ def _get_omam_ca_env(self):
+ """Check if the value is set in REQUESTS_CA_BUNDLE or CURL_CA_BUNDLE or OMAM_CA_BUNDLE or returns None"""
+ return os.environ.get("REQUESTS_CA_BUNDLE") or os.environ.get("CURL_CA_BUNDLE") or os.environ.get("OMAM_CA_BUNDLE")
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
new file mode 100644
index 000000000..d0da26e57
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+
+# Dell OpenManage Ansible Modules
+# Version 6.1.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+CHANGES_MSG = "Changes found to be applied."
+NO_CHANGES_MSG = "No changes found to be applied."
+RESET_UNTRACK = "iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply."
+RESET_SUCCESS = "iDRAC has been reset successfully."
+RESET_FAIL = "Unable to reset the iDRAC. For changes to reflect, manually reset the iDRAC."
+SYSTEM_ID = "System.Embedded.1"
+MANAGER_ID = "iDRAC.Embedded.1"
+SYSTEMS_URI = "/redfish/v1/Systems"
+MANAGERS_URI = "/redfish/v1/Managers"
+IDRAC_RESET_URI = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset"
+SYSTEM_RESET_URI = "/redfish/v1/Systems/{res_id}/Actions/ComputerSystem.Reset"
+MANAGER_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs?$expand=*($levels=1)"
+MANAGER_JOB_ID_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{0}"
+
+
+import time
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+
+def strip_substr_dict(odata_dict, chkstr='@odata.', case_sensitive=False):
+ '''
+ :param odata_dict: the dict to be stripped of unwanted keys
+ :param chkstr: the substring to be checked among the keys
+ :param case_sensitive: should the match be case sensitive or not
+ :return: dict
+ '''
+ cp = odata_dict.copy()
+ klist = cp.keys()
+ if not case_sensitive:
+ chkstr = chkstr.lower()
+ for k in klist:
+ if case_sensitive:
+ lk = k
+ else:
+ lk = str(k).lower()
+ if chkstr in lk:
+ odata_dict.pop(k, None)
+ return odata_dict
+
+
+def job_tracking(rest_obj, job_uri, max_job_wait_sec=600, job_state_var=('LastRunStatus', 'Id'),
+ job_complete_states=(2060, 2020, 2090), job_fail_states=(2070, 2101, 2102, 2103),
+ job_running_states=(2050, 2040, 2030, 2100),
+ sleep_interval_secs=10, max_unresponsive_wait=30, initial_wait=1):
+ '''
+ :param rest_obj: the rest_obj either of the below
+ ansible_collections.dellemc.openmanage.plugins.module_utils.ome.RestOME
+ :param job_uri: the uri to fetch the job response dict
+ :param max_job_wait_sec: max time the job will wait
+ :param job_state_var: The nested dict traversal path
+ :param job_complete_states:
+ :param job_fail_states:
+ :param job_running_states:
+ :param sleep_interval_secs:
+ :param max_unresponsive_wait:
+ :param initial_wait:
+ :return:
+ '''
+ # ome_job_status_map = {
+ # 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "completed successfully",
+ # 2070: "Failed", 2090: "completed with errors", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped",
+ # 2103: "Canceled"
+ # }
+ # ensure job states are mutually exclusive
+ max_retries = max_job_wait_sec // sleep_interval_secs
+ unresp = max_unresponsive_wait // sleep_interval_secs
+ loop_ctr = 0
+ job_failed = True
+ job_dict = {}
+ wait_time = 0
+ if set(job_complete_states) & set(job_fail_states):
+ return job_failed, "Overlapping job states found.", job_dict, wait_time
+ msg = "Job tracking started."
+ time.sleep(initial_wait)
+ while loop_ctr < max_retries:
+ loop_ctr += 1
+ try:
+ job_resp = rest_obj.invoke_request('GET', job_uri)
+ job_dict = job_resp.json_data
+ job_status = job_dict
+ for x in job_state_var:
+ job_status = job_status.get(x, {})
+ if job_status in job_complete_states:
+ job_failed = False
+ msg = "Job tracking completed."
+ loop_ctr = max_retries
+ elif job_status in job_fail_states:
+ job_failed = True
+ msg = "Job is in Failed state."
+ loop_ctr = max_retries
+ if job_running_states:
+ if job_status in job_running_states:
+ time.sleep(sleep_interval_secs)
+ wait_time = wait_time + sleep_interval_secs
+ else:
+ time.sleep(sleep_interval_secs)
+ wait_time = wait_time + sleep_interval_secs
+ except Exception as err:
+ if unresp:
+ time.sleep(sleep_interval_secs)
+ wait_time = wait_time + sleep_interval_secs
+ else:
+ job_failed = True
+ msg = "Exception in job tracking " + str(err)
+ break
+ unresp = unresp - 1
+ return job_failed, msg, job_dict, wait_time
+
+
+def idrac_redfish_job_tracking(
+ rest_obj, job_uri, max_job_wait_sec=600, job_state_var='JobState',
+ job_complete_states=("Completed", "Downloaded", "CompletedWithErrors", "RebootCompleted"),
+ job_fail_states=("Failed", "RebootFailed", "Unknown"),
+ job_running_states=("Running", "RebootPending", "Scheduling", "Scheduled", "Downloading", "Waiting", "Paused",
+ "New", "PendingActivation", "ReadyForExecution"),
+ sleep_interval_secs=10, max_unresponsive_wait=30, initial_wait=1):
+ # idrac_redfish_job_sates = [ "New", "Scheduled", "Running", "Completed", "Downloading", "Downloaded",
+ # "Scheduling", "ReadyForExecution", "Waiting", "Paused", "Failed", "CompletedWithErrors", "RebootPending",
+ # "RebootFailed", "RebootCompleted", "PendingActivation", "Unknown"]
+ max_retries = max_job_wait_sec // sleep_interval_secs
+ unresp = max_unresponsive_wait // sleep_interval_secs
+ loop_ctr = 0
+ job_failed = True
+ job_dict = {}
+ wait_time = 0
+ if set(job_complete_states) & set(job_fail_states):
+ return job_failed, "Overlapping job states found.", job_dict, wait_time
+ msg = "Job tracking started."
+ time.sleep(initial_wait)
+ while loop_ctr < max_retries:
+ loop_ctr += 1
+ try:
+ job_resp = rest_obj.invoke_request(job_uri, 'GET')
+ job_dict = job_resp.json_data
+ job_status = job_dict
+ job_status = job_status.get(job_state_var, "Unknown")
+ if job_status in job_running_states:
+ time.sleep(sleep_interval_secs)
+ wait_time = wait_time + sleep_interval_secs
+ elif job_status in job_complete_states:
+ job_failed = False
+ msg = "Job tracking completed."
+ loop_ctr = max_retries
+ elif job_status in job_fail_states:
+ job_failed = True
+ msg = "Job is in {0} state.".format(job_status)
+ loop_ctr = max_retries
+ else: # unrecognised states, just wait
+ time.sleep(sleep_interval_secs)
+ wait_time = wait_time + sleep_interval_secs
+ except Exception as err:
+ if unresp:
+ time.sleep(sleep_interval_secs)
+ wait_time = wait_time + sleep_interval_secs
+ else:
+ job_failed = True
+ msg = "Exception in job tracking " + str(err)
+ break
+ unresp = unresp - 1
+ return job_failed, msg, job_dict, wait_time
+
+
+def get_rest_items(rest_obj, uri="DeviceService/Devices", key="Id", value="Identifier", selector="value"):
+ item_dict = {}
+ resp = rest_obj.get_all_items_with_pagination(uri)
+ if resp.get(selector):
+ item_dict = dict((item.get(key), item.get(value)) for item in resp[selector])
+ return item_dict
+
+
+def get_item_and_list(rest_obj, name, uri, key='Name', value='value'):
+ resp = rest_obj.invoke_request('GET', uri)
+ tlist = []
+ if resp.success and resp.json_data.get(value):
+ tlist = resp.json_data.get(value, [])
+ for xtype in tlist:
+ if xtype.get(key, "") == name:
+ return xtype, tlist
+ return {}, tlist
+
+
+def apply_diff_key(src, dest, klist):
+ diff_cnt = 0
+ for k in klist:
+ v = src.get(k)
+ if v is not None and v != dest.get(k):
+ dest[k] = v
+ diff_cnt = diff_cnt + 1
+ return diff_cnt
+
+
+def wait_for_job_completion(redfish_obj, uri, job_wait=True, wait_timeout=120, sleep_time=10):
+ max_sleep_time = wait_timeout
+ sleep_interval = sleep_time
+ if job_wait:
+ while max_sleep_time:
+ if max_sleep_time > sleep_interval:
+ max_sleep_time = max_sleep_time - sleep_interval
+ else:
+ sleep_interval = max_sleep_time
+ max_sleep_time = 0
+ time.sleep(sleep_interval)
+ job_resp = redfish_obj.invoke_request("GET", uri)
+ if job_resp.json_data.get("PercentComplete") == 100:
+ time.sleep(10)
+ return job_resp, ""
+ else:
+ job_resp = redfish_obj.invoke_request("GET", uri)
+ time.sleep(10)
+ return job_resp, ""
+ return {}, "The job is not complete after {0} seconds.".format(wait_timeout)
+
+
+def wait_after_idrac_reset(idrac, wait_time_sec, interval=30):
+ time.sleep(interval // 2)
+ msg = RESET_UNTRACK
+ wait = wait_time_sec
+ track_failed = True
+ while wait > 0:
+ try:
+ idrac.invoke_request(MANAGERS_URI, 'GET')
+ time.sleep(interval // 2)
+ msg = RESET_SUCCESS
+ track_failed = False
+ break
+ except Exception:
+ time.sleep(interval)
+ wait = wait - interval
+ return track_failed, msg
+
+
+# Can this be in idrac_redfish???
+def reset_idrac(idrac_restobj, wait_time_sec=300, res_id=MANAGER_ID, interval=30):
+ track_failed = True
+ reset_msg = "iDRAC reset triggered successfully."
+ try:
+ resp = idrac_restobj.invoke_request(IDRAC_RESET_URI.format(res_id=res_id), 'POST',
+ data={"ResetType": "GracefulRestart"})
+ if wait_time_sec:
+ track_failed, reset_msg = wait_after_idrac_reset(idrac_restobj, wait_time_sec, interval)
+ reset = True
+ except Exception:
+ reset = False
+ reset_msg = RESET_FAIL
+ return reset, track_failed, reset_msg
+
+
+def get_manager_res_id(idrac):
+ try:
+ resp = idrac.invoke_request(MANAGERS_URI, "GET")
+ membs = resp.json_data.get("Members")
+ res_uri = membs[0].get('@odata.id')
+ res_id = res_uri.split("/")[-1]
+ except HTTPError:
+ res_id = MANAGER_ID
+ return res_id
+
+
+def wait_for_idrac_job_completion(idrac, uri, job_wait=True, wait_timeout=120, sleep_time=10):
+ max_sleep_time = wait_timeout
+ sleep_interval = sleep_time
+ job_msg = "The job is not complete after {0} seconds.".format(wait_timeout)
+ if job_wait:
+ while max_sleep_time:
+ if max_sleep_time > sleep_interval:
+ max_sleep_time = max_sleep_time - sleep_interval
+ else:
+ sleep_interval = max_sleep_time
+ max_sleep_time = 0
+ time.sleep(sleep_interval)
+ job_resp = idrac.invoke_request(uri, "GET")
+ if job_resp.json_data.get("PercentComplete") == 100:
+ time.sleep(10)
+ return job_resp, ""
+ if job_resp.json_data.get("JobState") == "RebootFailed":
+ time.sleep(10)
+ return job_resp, job_msg
+ else:
+ job_resp = idrac.invoke_request(uri, "GET")
+ time.sleep(10)
+ return job_resp, ""
+ return {}, "The job is not complete after {0} seconds.".format(wait_timeout)
+
+
+def idrac_system_reset(idrac, res_id, payload=None, job_wait=True, wait_time_sec=300, interval=30):
+ track_failed, reset, job_resp = True, False, {}
+ reset_msg = RESET_UNTRACK
+ try:
+ idrac.invoke_request(SYSTEM_RESET_URI.format(res_id=res_id), 'POST', data=payload)
+ time.sleep(10)
+ if wait_time_sec:
+ resp = idrac.invoke_request(MANAGER_JOB_URI, "GET")
+ job = list(filter(lambda d: d["JobState"] in ["RebootPending"], resp.json_data["Members"]))
+ if job:
+ job_resp, msg = wait_for_idrac_job_completion(idrac, MANAGER_JOB_ID_URI.format(job[0]["Id"]),
+ job_wait=job_wait, wait_timeout=wait_time_sec)
+ if "job is not complete" in msg:
+ reset, reset_msg = False, msg
+ if not msg:
+ reset = True
+ except Exception:
+ reset = False
+ reset_msg = RESET_FAIL
+ return reset, track_failed, reset_msg, job_resp
+
+
+def get_system_res_id(idrac):
+ res_id = SYSTEM_ID
+ error_msg = ""
+ try:
+ resp = idrac.invoke_request(SYSTEMS_URI, "GET")
+ except HTTPError:
+ error_msg = "Unable to complete the request because the resource URI " \
+ "does not exist or is not implemented."
+ else:
+ member = resp.json_data.get("Members")
+ res_uri = member[0].get('@odata.id')
+ res_id = res_uri.split("/")[-1]
+ return res_id, error_msg
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/__init__.py b/ansible_collections/dellemc/openmanage/plugins/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/__init__.py
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py
new file mode 100644
index 000000000..945fd90e9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.0.0
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: dellemc_configure_idrac_eventing
+short_description: Configures the iDRAC eventing related attributes
+version_added: "1.0.0"
+deprecated:
+ removed_at_date: "2024-07-31"
+ why: Replaced with M(dellemc.openmanage.idrac_attributes).
+ alternative: Use M(dellemc.openmanage.idrac_attributes) instead.
+ removed_from_collection: dellemc.openmanage
+description:
+ - This module allows to configure the iDRAC eventing related attributes.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ share_name:
+ type: str
+ description:
+ - (deprecated)Network share or a local path.
+ - This option is deprecated and will be removed in the later version.
+ share_user:
+ type: str
+ description:
+ - (deprecated)Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ - This option is deprecated and will be removed in the later version.
+ share_password:
+ type: str
+ description:
+ - (deprecated)Network share user password. This option is mandatory for CIFS Network Share.
+ - This option is deprecated and will be removed in the later version.
+ aliases: ['share_pwd']
+ share_mnt:
+ type: str
+ description:
+ - (deprecated)Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for Network Share.
+ - This option is deprecated and will be removed in the later version.
+ destination_number:
+ type: int
+ description: Destination number for SNMP Trap.
+ destination:
+ type: str
+ description: Destination for SNMP Trap.
+ snmp_v3_username:
+ type: str
+ description: SNMP v3 username for SNMP Trap.
+ snmp_trap_state:
+ type: str
+ description: Whether to Enable or Disable SNMP alert.
+ choices: [Enabled, Disabled]
+ email_alert_state:
+ type: str
+ description: Whether to Enable or Disable Email alert.
+ choices: [Enabled, Disabled]
+ alert_number:
+ type: int
+ description: Alert number for Email configuration.
+ address:
+ type: str
+ description: Email address for SNMP Trap.
+ custom_message:
+ type: str
+ description: Custom message for SNMP Trap reference.
+ enable_alerts:
+ type: str
+ description: Whether to Enable or Disable iDRAC alerts.
+ choices: [Enabled, Disabled]
+ authentication:
+ type: str
+ description: Simple Mail Transfer Protocol Authentication.
+ choices: [Enabled, Disabled]
+ smtp_ip_address:
+ type: str
+ description: SMTP IP address for communication.
+ smtp_port:
+ type: str
+ description: SMTP Port number for access.
+ username:
+ type: str
+ description: Username for SMTP authentication.
+ password:
+ type: str
+ description: Password for SMTP authentication.
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure the iDRAC eventing attributes
+ dellemc.openmanage.dellemc_configure_idrac_eventing:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ destination_number: "2"
+ destination: "1.1.1.1"
+ snmp_v3_username: "None"
+ snmp_trap_state: "Enabled"
+ email_alert_state: "Disabled"
+ alert_number: "1"
+ address: "alert_email@company.com"
+ custom_message: "Custom Message"
+ enable_alerts: "Disabled"
+ authentication: "Enabled"
+ smtp_ip_address: "192.168.0.1"
+ smtp_port: "25"
+ username: "username"
+ password: "password"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Successfully configured the iDRAC eventing settings.
+ returned: always
+ type: str
+ sample: Successfully configured the iDRAC eventing settings.
+eventing_status:
+ description: Configures the iDRAC eventing attributes.
+ returned: success
+ type: dict
+ sample: {
+ "CompletionTime": "2020-04-02T02:43:28",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_12345123456",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "SYS053",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import os
+import tempfile
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+try:
+ from omdrivers.enums.iDRAC.iDRAC import (State_SNMPAlertTypes, Enable_EmailAlertTypes,
+ AlertEnable_IPMILanTypes,
+ SMTPAuthentication_RemoteHostsTypes)
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+def run_idrac_eventing_config(idrac, module):
+ """
+ Get Lifecycle Controller status
+
+ Keyword arguments:
+ idrac -- iDRAC handle
+ module -- Ansible module
+ """
+ idrac.use_redfish = True
+ share_path = tempfile.gettempdir() + os.sep
+ upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True)
+ if not upd_share.IsValid:
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ set_liason = idrac.config_mgr.set_liason_share(upd_share)
+ if set_liason['Status'] == "Failed":
+ try:
+ message = set_liason['Data']['Message']
+ except (IndexError, KeyError):
+ message = set_liason['Message']
+ module.fail_json(msg=message)
+
+ if module.params["destination_number"] is not None:
+ if module.params["destination"] is not None:
+ idrac.config_mgr.configure_snmp_trap_destination(
+ destination=module.params["destination"],
+ destination_number=module.params["destination_number"]
+ )
+ if module.params["snmp_v3_username"] is not None:
+ idrac.config_mgr.configure_snmp_trap_destination(
+ snmp_v3_username=module.params["snmp_v3_username"],
+ destination_number=module.params["destination_number"]
+ )
+ if module.params["snmp_trap_state"] is not None:
+ idrac.config_mgr.configure_snmp_trap_destination(
+ state=State_SNMPAlertTypes[module.params["snmp_trap_state"]],
+ destination_number=module.params["destination_number"]
+ )
+
+ if module.params["alert_number"] is not None:
+ if module.params["email_alert_state"] is not None:
+ idrac.config_mgr.configure_email_alerts(
+ state=Enable_EmailAlertTypes[module.params["email_alert_state"]],
+ alert_number=module.params["alert_number"]
+ )
+ if module.params["address"] is not None:
+ idrac.config_mgr.configure_email_alerts(
+ address=module.params["address"],
+ alert_number=module.params["alert_number"]
+ )
+ if module.params["custom_message"] is not None:
+ idrac.config_mgr.configure_email_alerts(
+ custom_message=module.params["custom_message"],
+ alert_number=module.params["alert_number"]
+ )
+
+ if module.params["enable_alerts"] is not None:
+ idrac.config_mgr.configure_idrac_alerts(
+ enable_alerts=AlertEnable_IPMILanTypes[module.params["enable_alerts"]],
+ )
+
+ if module.params['authentication'] is not None:
+ idrac.config_mgr.configure_smtp_server_settings(
+ authentication=SMTPAuthentication_RemoteHostsTypes[module.params['authentication']])
+ if module.params['smtp_ip_address'] is not None:
+ idrac.config_mgr.configure_smtp_server_settings(
+ smtp_ip_address=module.params['smtp_ip_address'])
+ if module.params['smtp_port'] is not None:
+ idrac.config_mgr.configure_smtp_server_settings(
+ smtp_port=module.params['smtp_port'])
+ if module.params['username'] is not None:
+ idrac.config_mgr.configure_smtp_server_settings(
+ username=module.params['username'])
+ if module.params['password'] is not None:
+ idrac.config_mgr.configure_smtp_server_settings(
+ password=module.params['password'])
+
+ if module.check_mode:
+ status = idrac.config_mgr.is_change_applicable()
+ if status.get("changes_applicable"):
+ module.exit_json(msg="Changes found to commit!", changed=True)
+ else:
+ module.exit_json(msg="No changes found to commit!")
+ else:
+ status = idrac.config_mgr.apply_changes(reboot=False)
+
+ return status
+
+
+def main():
+ specs = dict(
+ share_name=dict(required=False, type='str'),
+ share_password=dict(required=False, type='str', aliases=['share_pwd'], no_log=True),
+ share_user=dict(required=False, type='str'),
+ share_mnt=dict(required=False, type='str'),
+ # setup SNMP Trap Destination
+ destination_number=dict(required=False, type="int"),
+ destination=dict(required=False, type="str"),
+ snmp_v3_username=dict(required=False, type="str"),
+ snmp_trap_state=dict(required=False, choices=["Enabled", "Disabled"], default=None),
+ # setup Email Alerts
+ alert_number=dict(required=False, type="int"),
+ address=dict(required=False, default=None, type="str"),
+ custom_message=dict(required=False, default=None, type="str"),
+ email_alert_state=dict(required=False, choices=["Enabled", "Disabled"], default=None),
+ # setup iDRAC Alerts
+ enable_alerts=dict(required=False, choices=["Enabled", "Disabled"], default=None),
+ # setup SMTP
+ authentication=dict(required=False, choices=['Enabled', 'Disabled'], default=None),
+ smtp_ip_address=dict(required=False, default=None, type='str'),
+ smtp_port=dict(required=False, type='str'),
+ username=dict(required=False, type="str"),
+ password=dict(required=False, type="str", no_log=True),
+ )
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ status = run_idrac_eventing_config(idrac, module)
+ msg, changed = "Successfully configured the iDRAC eventing settings.", True
+ if status.get('Status') == "Success":
+ if (status.get('Message') == "No changes found to commit!") or \
+ ("No changes were applied" in status.get('Message')):
+ msg = status.get('Message')
+ changed = False
+ elif status.get('Status') == "Failed":
+ module.fail_json(msg="Failed to configure the iDRAC eventing settings")
+ module.exit_json(msg=msg, eventing_status=status, changed=changed)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, ImportError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py
new file mode 100644
index 000000000..5a0eacf1b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.0.0
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: dellemc_configure_idrac_services
+short_description: Configures the iDRAC services related attributes
+version_added: "1.0.0"
+deprecated:
+ removed_at_date: "2024-07-31"
+ why: Replaced with M(dellemc.openmanage.idrac_attributes).
+ alternative: Use M(dellemc.openmanage.idrac_attributes) instead.
+ removed_from_collection: dellemc.openmanage
+description:
+ - This module allows to configure the iDRAC services related attributes.
+options:
+ idrac_ip:
+ required: True
+ type: str
+ description: iDRAC IP Address.
+ idrac_user:
+ required: True
+ type: str
+ description: iDRAC username.
+ idrac_password:
+ required: True
+ type: str
+ description: iDRAC user password.
+ aliases: ['idrac_pwd']
+ idrac_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ type: bool
+ default: True
+ version_added: 5.0.0
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ version_added: 5.0.0
+ share_name:
+ type: str
+ description:
+ - (deprecated)Network share or a local path.
+ - This option is deprecated and will be removed in the later version.
+ share_user:
+ type: str
+ description:
+ - (deprecated)Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ - This option is deprecated and will be removed in the later version.
+ share_password:
+ type: str
+ description:
+ - (deprecated)Network share user password. This option is mandatory for CIFS Network Share.
+ - This option is deprecated and will be removed in the later version.
+ aliases: ['share_pwd']
+ share_mnt:
+ type: str
+ description:
+ - (deprecated)Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for Network Share.
+ - This option is deprecated and will be removed in the later version.
+ enable_web_server:
+ type: str
+ description: Whether to Enable or Disable webserver configuration for iDRAC.
+ choices: [Enabled, Disabled]
+ ssl_encryption:
+ type: str
+ description: Secure Socket Layer encryption for webserver.
+ choices: [Auto_Negotiate, T_128_Bit_or_higher, T_168_Bit_or_higher, T_256_Bit_or_higher]
+ tls_protocol:
+ type: str
+ description: Transport Layer Security for webserver.
+ choices: [TLS_1_0_and_Higher, TLS_1_1_and_Higher, TLS_1_2_Only]
+ https_port:
+ type: int
+ description: HTTPS access port.
+ http_port:
+ type: int
+ description: HTTP access port.
+ timeout:
+ type: str
+ description: Timeout value.
+ snmp_enable:
+ type: str
+ description: Whether to Enable or Disable SNMP protocol for iDRAC.
+ choices: [Enabled, Disabled]
+ snmp_protocol:
+ type: str
+ description: Type of the SNMP protocol.
+ choices: [All, SNMPv3]
+ community_name:
+ type: str
+ description: SNMP community name for iDRAC. It is used by iDRAC to validate SNMP queries
+ received from remote systems requesting SNMP data access.
+ alert_port:
+ type: int
+ description: The iDRAC port number that must be used for SNMP traps.
+ The default value is 162, and the acceptable range is between 1 to 65535.
+ default: 162
+ discovery_port:
+ type: int
+ description: The SNMP agent port on the iDRAC. The default value is 161,
+ and the acceptable range is between 1 to 65535.
+ default: 161
+ trap_format:
+ type: str
+ description: SNMP trap format for iDRAC.
+ choices: [SNMPv1, SNMPv2, SNMPv3]
+ ipmi_lan:
+ type: dict
+ description: Community name set on iDRAC for SNMP settings.
+ suboptions:
+ community_name:
+ type: str
+ description: This option is used by iDRAC when it sends out SNMP and IPMI traps.
+ The community name is checked by the remote system to which the traps are sent.
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure the iDRAC services attributes
+ dellemc.openmanage.dellemc_configure_idrac_services:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_web_server: "Enabled"
+ http_port: 80
+ https_port: 443
+ ssl_encryption: "Auto_Negotiate"
+ tls_protocol: "TLS_1_2_Only"
+ timeout: "1800"
+ snmp_enable: "Enabled"
+ snmp_protocol: "SNMPv3"
+ community_name: "public"
+ alert_port: 162
+ discovery_port: 161
+ trap_format: "SNMPv3"
+ ipmi_lan:
+ community_name: "public"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of iDRAC service attributes configuration.
+ returned: always
+ type: str
+ sample: Successfully configured the iDRAC services settings.
+service_status:
+ description: Details of iDRAC services attributes configuration.
+ returned: success
+ type: dict
+ sample: {
+ "CompletionTime": "2020-04-02T02:43:28",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_12345123456",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "SYS053",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import os
+import tempfile
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+try:
+ from omdrivers.enums.iDRAC.iDRAC import (Enable_WebServerTypes,
+ SSLEncryptionBitLength_WebServerTypes,
+ TLSProtocol_WebServerTypes,
+ AgentEnable_SNMPTypes,
+ SNMPProtocol_SNMPTypes)
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+def run_idrac_services_config(idrac, module):
+ """
+ Get Lifecycle Controller status
+
+ Keyword arguments:
+ idrac -- iDRAC handle
+ module -- Ansible module
+ """
+ idrac.use_redfish = True
+ share_path = tempfile.gettempdir() + os.sep
+ upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True)
+ if not upd_share.IsValid:
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ set_liason = idrac.config_mgr.set_liason_share(upd_share)
+ if set_liason['Status'] == "Failed":
+ try:
+ message = set_liason['Data']['Message']
+ except (IndexError, KeyError):
+ message = set_liason['Message']
+ module.fail_json(msg=message)
+
+ if module.params['enable_web_server'] is not None:
+ idrac.config_mgr.configure_web_server(
+ enable_web_server=Enable_WebServerTypes[module.params['enable_web_server']]
+ )
+ if module.params['http_port'] is not None:
+ idrac.config_mgr.configure_web_server(
+ http_port=module.params['http_port']
+ )
+ if module.params['https_port'] is not None:
+ idrac.config_mgr.configure_web_server(
+ https_port=module.params['https_port']
+ )
+ if module.params['timeout'] is not None:
+ idrac.config_mgr.configure_web_server(
+ timeout=module.params['timeout']
+ )
+ if module.params['ssl_encryption'] is not None:
+ idrac.config_mgr.configure_web_server(
+ ssl_encryption=SSLEncryptionBitLength_WebServerTypes[module.params['ssl_encryption']]
+ )
+ if module.params['tls_protocol'] is not None:
+ idrac.config_mgr.configure_web_server(
+ tls_protocol=TLSProtocol_WebServerTypes[module.params['tls_protocol']]
+ )
+
+ if module.params['snmp_enable'] is not None:
+ idrac.config_mgr.configure_snmp(
+ snmp_enable=AgentEnable_SNMPTypes[module.params['snmp_enable']]
+ )
+ if module.params['community_name'] is not None:
+ idrac.config_mgr.configure_snmp(
+ community_name=module.params['community_name']
+ )
+ if module.params['snmp_protocol'] is not None:
+ idrac.config_mgr.configure_snmp(
+ snmp_protocol=SNMPProtocol_SNMPTypes[module.params['snmp_protocol']]
+ )
+ if module.params['alert_port'] is not None:
+ idrac.config_mgr.configure_snmp(
+ alert_port=module.params['alert_port']
+ )
+ if module.params['discovery_port'] is not None:
+ idrac.config_mgr.configure_snmp(
+ discovery_port=module.params['discovery_port']
+ )
+ if module.params['trap_format'] is not None:
+ idrac.config_mgr.configure_snmp(
+ trap_format=module.params['trap_format']
+ )
+ if module.params['ipmi_lan'] is not None:
+ ipmi_option = module.params.get('ipmi_lan')
+ community_name = ipmi_option.get('community_name')
+ if community_name is not None:
+ idrac.config_mgr.configure_snmp(ipmi_community=community_name)
+
+ if module.check_mode:
+ status = idrac.config_mgr.is_change_applicable()
+ if status.get('changes_applicable'):
+ module.exit_json(msg="Changes found to commit!", changed=True)
+ else:
+ module.exit_json(msg="No changes found to commit!")
+ else:
+ return idrac.config_mgr.apply_changes(reboot=False)
+
+
+# Main
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+
+ # iDRAC credentials
+ idrac_ip=dict(required=True, type='str'),
+ idrac_user=dict(required=True, type='str'),
+ idrac_password=dict(required=True, type='str', aliases=['idrac_pwd'], no_log=True),
+ idrac_port=dict(required=False, default=443, type='int'),
+ validate_certs=dict(type='bool', default=True),
+ ca_path=dict(type='path'),
+ # Export Destination
+ share_name=dict(required=False, type='str'),
+ share_password=dict(required=False, type='str', aliases=['share_pwd'], no_log=True),
+ share_user=dict(required=False, type='str'),
+ share_mnt=dict(required=False, type='str'),
+
+ # setup Webserver
+ enable_web_server=dict(required=False, choices=['Enabled', 'Disabled'], default=None),
+ http_port=dict(required=False, default=None, type='int'),
+ https_port=dict(required=False, default=None, type='int'),
+ ssl_encryption=dict(required=False, choices=['Auto_Negotiate', 'T_128_Bit_or_higher',
+ 'T_168_Bit_or_higher', 'T_256_Bit_or_higher'],
+ default=None),
+ tls_protocol=dict(required=False, choices=['TLS_1_0_and_Higher',
+ 'TLS_1_1_and_Higher', 'TLS_1_2_Only'], default=None),
+ timeout=dict(required=False, default=None, type="str"),
+
+ # set up SNMP settings
+ snmp_enable=dict(required=False, choices=['Enabled', 'Disabled'], default=None),
+ community_name=dict(required=False, type='str'),
+ snmp_protocol=dict(required=False, choices=['All', 'SNMPv3'], default=None),
+ discovery_port=dict(required=False, type="int", default=161),
+
+ # set up SNMP settings
+ ipmi_lan=dict(required=False, type='dict', options=dict(community_name=dict(required=False, type='str'))),
+ alert_port=dict(required=False, type='int', default=162),
+ trap_format=dict(required=False, choices=['SNMPv1', 'SNMPv2', 'SNMPv3'], default=None),
+
+ ),
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ status = run_idrac_services_config(idrac, module)
+ if status.get('Status') == "Success":
+ changed = True
+ msg = "Successfully configured the iDRAC services settings."
+ if status.get('Message') and (status.get('Message') == "No changes found to commit!" or
+ "No changes were applied" in status.get('Message')):
+ msg = status.get('Message')
+ changed = False
+ module.exit_json(msg=msg, service_status=status, changed=changed)
+ else:
+ module.fail_json(msg="Failed to configure the iDRAC services.")
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, ImportError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py
new file mode 100644
index 000000000..d667c916e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: dellemc_get_firmware_inventory
+short_description: Get Firmware Inventory
+version_added: "1.0.0"
+deprecated:
+ removed_at_date: "2023-01-15"
+ why: Replaced with M(dellemc.openmanage.idrac_firmware_info).
+ alternative: Use M(dellemc.openmanage.idrac_firmware_info) instead.
+ removed_from_collection: dellemc.openmanage
+description: Get Firmware Inventory.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Rajeev Arakkal (@rajeevarakkal)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Get Installed Firmware Inventory
+ dellemc.openmanage.dellemc_get_firmware_inventory:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+"""
+
+RETURNS = """
+ansible_facts:
+ description: Displays components and their firmware versions. Also, list of the firmware
+ dictionaries (one dictionary per firmware).
+ returned: success
+ type: complex
+ sample: {
+ [
+ {
+ "BuildNumber": "0",
+ "Classifications": "10",
+ "ComponentID": "101100",
+ "ComponentType": "FRMW",
+ "DeviceID": null,
+ "ElementName": "Power Supply.Slot.1",
+ "FQDD": "PSU.Slot.1",
+ "IdentityInfoType": "OrgID:ComponentType:ComponentID",
+ "IdentityInfoValue": "DCIM:firmware:101100",
+ "InstallationDate": "2018-01-18T07:25:08Z",
+ "InstanceID": "DCIM:INSTALLED#0x15__PSU.Slot.1",
+ "IsEntity": "true",
+ "Key": "DCIM:INSTALLED#0x15__PSU.Slot.1",
+ "MajorVersion": "0",
+ "MinorVersion": "1",
+ "RevisionNumber": "7",
+ "RevisionString": null,
+ "Status": "Installed",
+ "SubDeviceID": null,
+ "SubVendorID": null,
+ "Updateable": "true",
+ "VendorID": null,
+ "VersionString": "00.1D.7D",
+ "impactsTPMmeasurements": "false"
+ }
+ ]
+ }
+"""
+
+
+import traceback
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkfile import LocalFile
+ from omsdk.catalog.sdkupdatemgr import UpdateManager
+ from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+def run_get_firmware_inventory(idrac, module):
+ """
+ Get Firmware Inventory
+ Keyword arguments:
+ idrac -- iDRAC handle
+ module -- Ansible module
+ """
+
+ msg = {}
+ # msg['changed'] = False
+ msg['failed'] = False
+ msg['msg'] = {}
+ error = False
+
+ try:
+ # idrac.use_redfish = True
+ msg['msg'] = idrac.update_mgr.InstalledFirmware
+ if "Status" in msg['msg']:
+ if msg['msg']['Status'] != "Success":
+ msg['failed'] = True
+
+ except Exception as err:
+ error = True
+ msg['msg'] = "Error: %s" % str(err)
+ msg['exception'] = traceback.format_exc()
+ msg['failed'] = True
+
+ return msg, error
+
+
+# Main
+def main():
+ module = AnsibleModule(
+ argument_spec=idrac_auth_params,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ msg, err = run_get_firmware_inventory(idrac, module)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+ if err:
+ module.fail_json(**msg)
+ module.exit_json(ansible_facts={idrac.ipaddr: {'Firmware Inventory': msg['msg']}})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py
new file mode 100644
index 000000000..e6a2d9eaf
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: dellemc_get_system_inventory
+short_description: Get the PowerEdge Server System Inventory
+version_added: "1.0.0"
+deprecated:
+ removed_at_date: "2023-01-15"
+ why: Replaced with M(dellemc.openmanage.idrac_system_info).
+ alternative: Use M(dellemc.openmanage.idrac_system_info) instead.
+ removed_from_collection: dellemc.openmanage
+description:
+ - Get the PowerEdge Server System Inventory.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Rajeev Arakkal (@rajeevarakkal)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Get System Inventory
+ dellemc.openmanage.dellemc_get_system_inventory:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+"""
+
+RETURNS = """
+ansible_facts:
+ description: Displays the Dell EMC PowerEdge Server System Inventory.
+ returned: success
+ type: complex
+ sample: {
+ "SystemInventory": {
+ "BIOS": [
+ {
+ "BIOSReleaseDate": "10/19/2017",
+ "FQDD": "BIOS.Setup.1-1",
+ "InstanceID": "DCIM:INSTALLED#741__BIOS.Setup.00",
+ "Key": "DCIM:INSTALLED#741__BIOS.Setup.00",
+ "SMBIOSPresent": "True",
+ "VersionString": "1.2.11"
+ }
+ ],
+ "CPU": [
+ {
+ "CPUFamily": "Intel(R) Xeon(TM)",
+ "Characteristics": "64-bit capable",
+ "CurrentClockSpeed": "2.3 GHz",
+ "DeviceDescription": "CPU 1",
+ "ExecuteDisabledCapable": "Yes",
+ }
+ ]
+ }
+}
+msg:
+ description: Details of the Error occurred.
+ returned: on error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+
+
+# Get System Inventory
+def run_get_system_inventory(idrac, module):
+ msg = {}
+ msg['changed'] = False
+ msg['failed'] = False
+ err = False
+
+ try:
+ # idrac.use_redfish = True
+ idrac.get_entityjson()
+ msg['msg'] = idrac.get_json_device()
+ except Exception as e:
+ err = True
+ msg['msg'] = "Error: %s" % str(e)
+ msg['failed'] = True
+ return msg, err
+
+
+# Main
+def main():
+ module = AnsibleModule(
+ argument_spec=idrac_auth_params,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ msg, err = run_get_system_inventory(idrac, module)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+ if err:
+ module.fail_json(**msg)
+ module.exit_json(ansible_facts={idrac.ipaddr: {'SystemInventory': msg['msg']}})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py
new file mode 100644
index 000000000..eec09c1c8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.0.0
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: dellemc_idrac_lc_attributes
+short_description: Enable or disable Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs
+version_added: "1.0.0"
+deprecated:
+ removed_at_date: "2024-07-31"
+ why: Replaced with M(dellemc.openmanage.idrac_attributes).
+ alternative: Use M(dellemc.openmanage.idrac_attributes) instead.
+ removed_from_collection: dellemc.openmanage
+description:
+ - This module is responsible for enabling or disabling of Collect System Inventory on Restart (CSIOR)
+ property for all iDRAC/LC jobs.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ share_name:
+ type: str
+ description:
+ - (deprecated)Network share or a local path.
+ - This option is deprecated and will be removed in the later version.
+ share_user:
+ type: str
+ description:
+ - (deprecated)Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ - This option is deprecated and will be removed in the later version.
+ share_password:
+ type: str
+ description:
+ - (deprecated)Network share user password. This option is mandatory for CIFS Network Share.
+ - This option is deprecated and will be removed in the later version.
+ aliases: ['share_pwd']
+ share_mnt:
+ type: str
+ description:
+ - (deprecated)Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for Network Share.
+ - This option is deprecated and will be removed in the later version.
+ csior:
+ type: str
+ description: Whether to Enable or Disable Collect System Inventory on Restart (CSIOR)
+ property for all iDRAC/LC jobs.
+ choices: [Enabled, Disabled]
+ default: Enabled
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Set up iDRAC LC Attributes
+ dellemc.openmanage.dellemc_idrac_lc_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ csior: "Enabled"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of iDRAC LC attributes configuration.
+ returned: always
+ type: str
+ sample: Successfully configured the iDRAC LC attributes.
+lc_attribute_status:
+ description: Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs is configured.
+ returned: success
+ type: dict
+ sample: {
+ "CompletionTime": "2020-03-30T00:06:53",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_1234512345",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "SYS053",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import os
+import tempfile
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+try:
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+# Get Lifecycle Controller status
+def run_setup_idrac_csior(idrac, module):
+ """
+ Get Lifecycle Controller status
+
+ Keyword arguments:
+ idrac -- iDRAC handle
+ module -- Ansible module
+ """
+ idrac.use_redfish = True
+ share_path = tempfile.gettempdir() + os.sep
+ upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True)
+ if not upd_share.IsValid:
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ set_liason = idrac.config_mgr.set_liason_share(upd_share)
+ if set_liason['Status'] == "Failed":
+ try:
+ message = set_liason['Data']['Message']
+ except (IndexError, KeyError):
+ message = set_liason['Message']
+ module.fail_json(msg=message)
+ if module.params['csior'] == 'Enabled':
+ # Enable csior
+ idrac.config_mgr.enable_csior()
+ elif module.params['csior'] == 'Disabled':
+ # Disable csior
+ idrac.config_mgr.disable_csior()
+
+ if module.check_mode:
+ status = idrac.config_mgr.is_change_applicable()
+ if status.get("changes_applicable"):
+ module.exit_json(msg="Changes found to commit!", changed=True)
+ else:
+ module.exit_json(msg="No changes found to commit!")
+ else:
+ return idrac.config_mgr.apply_changes(reboot=False)
+
+
+# Main
+def main():
+ specs = dict(
+ share_name=dict(required=False, type='str'),
+ share_password=dict(required=False, type='str', aliases=['share_pwd'], no_log=True),
+ share_user=dict(required=False, type='str'),
+ share_mnt=dict(required=False, type='str'),
+ csior=dict(required=False, choices=['Enabled', 'Disabled'], default='Enabled')
+ )
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ status = run_setup_idrac_csior(idrac, module)
+ if status.get('Status') == "Success":
+ changed = True
+ msg = "Successfully configured the iDRAC LC attributes."
+ if status.get('Message') and (status.get('Message') == "No changes found to commit!" or
+ "No changes were applied" in status.get('Message')):
+ msg = status.get('Message')
+ changed = False
+ module.exit_json(msg=msg, lc_attribute_status=status, changed=changed)
+ else:
+ module.fail_json(msg="Failed to configure the iDRAC LC attributes.")
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, ImportError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
new file mode 100644
index 000000000..01c915eae
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
@@ -0,0 +1,505 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: dellemc_idrac_storage_volume
+short_description: Configures the RAID configuration attributes
+version_added: "2.0.0"
+description:
+ - This module is responsible for configuring the RAID attributes.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ state:
+ type: str
+ description:
+ - C(create), performs create volume operation.
+ - C(delete), performs remove volume operation.
+ - C(view), returns storage view.
+ choices: ['create', 'delete', 'view']
+ default: 'view'
+ span_depth:
+ type: int
+ description:
+ - Number of spans in the RAID configuration.
+ - I(span_depth) is required for C(create) and its value depends on I(volume_type).
+ default: 1
+ span_length:
+ type: int
+ description:
+ - Number of disks in a span.
+ - I(span_length) is required for C(create) and its value depends on I(volume_type).
+ default: 1
+ number_dedicated_hot_spare:
+ type: int
+ description: Number of Dedicated Hot Spare.
+ default: 0
+ volume_type:
+ type: str
+ description: Provide the the required RAID level.
+ choices: ['RAID 0', 'RAID 1', 'RAID 5', 'RAID 6', 'RAID 10', 'RAID 50', 'RAID 60']
+ default: 'RAID 0'
+ disk_cache_policy:
+ type: str
+ description: Disk Cache Policy.
+ choices: ["Default", "Enabled", "Disabled"]
+ default: "Default"
+ write_cache_policy:
+ type: str
+ description: Write cache policy.
+ choices: ["WriteThrough", "WriteBack", "WriteBackForce"]
+ default: "WriteThrough"
+ read_cache_policy:
+ type: str
+ description: Read cache policy.
+ choices: ["NoReadAhead", "ReadAhead", "AdaptiveReadAhead"]
+ default: "NoReadAhead"
+ stripe_size:
+ type: int
+ description: Stripe size value to be provided in multiples of 64 * 1024.
+ default: 65536
+ controller_id:
+ type: str
+ description:
+ - >-
+ Fully Qualified Device Descriptor (FQDD) of the storage controller, for example 'RAID.Integrated.1-1'.
+ Controller FQDD is required for C(create) RAID configuration.
+ media_type:
+ type: str
+ description: Media type.
+ choices: ['HDD', 'SSD']
+ protocol:
+ type: str
+ description: Bus protocol.
+ choices: ['SAS', 'SATA']
+ volume_id:
+ type: str
+ description:
+ - >-
+ Fully Qualified Device Descriptor (FQDD) of the virtual disk, for example 'Disk.virtual.0:RAID.Slot.1-1'.
+ This option is used to get the virtual disk information.
+ volumes:
+ type: list
+ elements: dict
+ description:
+ - >-
+ A list of virtual disk specific iDRAC attributes. This is applicable for C(create) and C(delete) operations.
+ - >-
+ For C(create) operation, name and drives are applicable options, other volume options can also be specified.
+ - >-
+ The drives is a required option for C(create) operation and accepts either location (list of drive slot)
+ or id (list of drive fqdd).
+ - >-
+ For C(delete) operation, only name option is applicable.
+ - See the examples for more details.
+ capacity:
+ type: float
+ description: Virtual disk size in GB.
+ raid_reset_config:
+ type: str
+ description:
+ - >-
+ This option represents whether a reset config operation needs to be performed on the RAID controller.
+ Reset Config operation deletes all the virtual disks present on the RAID controller.
+ choices: ['True', 'False']
+ default: 'False'
+ raid_init_operation:
+ type: str
+ description: This option represents initialization configuration operation to be performed on the virtual disk.
+ choices: [None, Fast]
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create single volume
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "create"
+ controller_id: "RAID.Slot.1-1"
+ volumes:
+ - drives:
+ location: [5]
+
+- name: Create multiple volume
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ raid_reset_config: "True"
+ state: "create"
+ controller_id: "RAID.Slot.1-1"
+ volume_type: "RAID 1"
+ span_depth: 1
+ span_length: 2
+ number_dedicated_hot_spare: 1
+ disk_cache_policy: "Enabled"
+ write_cache_policy: "WriteBackForce"
+ read_cache_policy: "ReadAhead"
+ stripe_size: 65536
+ capacity: 100
+ raid_init_operation: "Fast"
+ volumes:
+ - name: "volume_1"
+ drives:
+ id: ["Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1", "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Slot.1-1"]
+ - name: "volume_2"
+ volume_type: "RAID 5"
+ span_length: 3
+ span_depth: 1
+ drives:
+ location: [7,3,5]
+ disk_cache_policy: "Disabled"
+ write_cache_policy: "WriteBack"
+ read_cache_policy: "NoReadAhead"
+ stripe_size: 131072
+ capacity: "200"
+ raid_init_operation: "None"
+
+- name: View all volume details
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "view"
+
+- name: View specific volume details
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "view"
+ controller_id: "RAID.Slot.1-1"
+ volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
+
+- name: Delete single volume
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "delete"
+ volumes:
+ - name: "volume_1"
+
+- name: Delete multiple volume
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "delete"
+ volumes:
+ - name: "volume_1"
+ - name: "volume_2"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the storage configuration operation.
+ returned: always
+ sample: "Successfully completed the view storage volume operation"
+storage_status:
+ type: dict
+ description: Storage configuration job and progress details from the iDRAC.
+ returned: success
+ sample:
+ {
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageId": "XXX123",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+'''
+
+
+import os
+import tempfile
+import copy
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omdrivers.types.iDRAC.RAID import RAIDactionTypes, RAIDdefaultReadPolicyTypes, RAIDinitOperationTypes, \
+ DiskCachePolicyTypes, RAIDresetConfigTypes
+ from omsdk.sdkfile import file_share_manager
+except ImportError:
+ pass
+
+
+def error_handling_for_negative_num(option, val):
+ return "{0} cannot be a negative number or zero,got {1}".format(option, val)
+
+
+def set_liason_share(idrac, module):
+ idrac.use_redfish = True
+ share_name = tempfile.gettempdir() + os.sep
+ storage_share = file_share_manager.create_share_obj(share_path=share_name,
+ isFolder=True)
+ set_liason = idrac.config_mgr.set_liason_share(storage_share)
+ if set_liason['Status'] == "Failed":
+ liason_data = set_liason.get('Data', set_liason)
+ module.fail_json(msg=liason_data.get('Message', "Failed to set Liason share"))
+
+
+def view_storage(idrac, module):
+ idrac.get_entityjson()
+ storage_status = idrac.config_mgr.RaidHelper.view_storage(controller=module.params["controller_id"],
+ virtual_disk=module.params['volume_id'])
+ if storage_status['Status'] == 'Failed':
+ module.fail_json(msg="Failed to fetch storage details", storage_status=storage_status)
+ return storage_status
+
+
+def create_storage(idrac, module):
+ pd_filter = '((disk.parent.parent is Controller and ' \
+ 'disk.parent.parent.FQDD._value == "{0}")' \
+ .format(module.params["controller_id"])
+ pd_filter += ' or (disk.parent is Controller and ' \
+ 'disk.parent.FQDD._value == "{0}"))' \
+ .format(module.params["controller_id"])
+
+ vd_values = []
+ if module.params['volumes'] is not None:
+ for each in module.params['volumes']:
+ mod_args = copy.deepcopy(module.params)
+ each_vd = multiple_vd_config(mod_args=mod_args,
+ each_vd=each, pd_filter=pd_filter)
+ vd_values.append(each_vd)
+ else:
+ each_vd = multiple_vd_config(mod_args=module.params,
+ pd_filter=pd_filter)
+ vd_values.append(each_vd)
+ storage_status = idrac.config_mgr.RaidHelper.new_virtual_disk(multiple_vd=vd_values,
+ apply_changes=not module.check_mode)
+ return storage_status
+
+
+def delete_storage(idrac, module):
+ names = [key.get("name") for key in module.params['volumes']]
+ storage_status = idrac.config_mgr.RaidHelper.delete_virtual_disk(vd_names=names,
+ apply_changes=not module.check_mode)
+ return storage_status
+
+
+def _validate_options(options):
+ if options['state'] == "create":
+ if options["controller_id"] is None or options["controller_id"] == "":
+ raise ValueError('Controller ID is required.')
+ capacity = options.get("capacity")
+ if capacity is not None:
+ size_check = float(capacity)
+ if size_check <= 0:
+ raise ValueError(error_handling_for_negative_num("capacity", capacity))
+ stripe_size = options.get('stripe_size')
+ if stripe_size is not None:
+ stripe_size_check = int(stripe_size)
+ if stripe_size_check <= 0:
+ raise ValueError(error_handling_for_negative_num("stripe_size", stripe_size))
+ # validating for each vd options
+ if options['volumes'] is not None:
+ for each in options['volumes']:
+ drives = each.get("drives")
+ if drives:
+ if "id" in drives and "location" in drives:
+ raise ValueError("Either {0} or {1} is allowed".format("id", "location"))
+ elif "id" not in drives and "location" not in drives:
+ raise ValueError("Either {0} or {1} should be specified".format("id", "location"))
+ else:
+ raise ValueError("Drives must be defined for volume creation.")
+ capacity = each.get("capacity")
+ if capacity is not None:
+ size_check = float(capacity)
+ if size_check <= 0:
+ raise ValueError(error_handling_for_negative_num("capacity", capacity))
+ stripe_size = each.get('stripe_size')
+ if stripe_size is not None:
+ stripe_size_check = int(stripe_size)
+ if stripe_size_check <= 0:
+ raise ValueError(error_handling_for_negative_num("stripe_size", stripe_size))
+ elif options['state'] == "delete":
+ message = "Virtual disk name is a required parameter for remove virtual disk operations."
+ if options['volumes'] is None or None in options['volumes']:
+ raise ValueError(message)
+ elif options['volumes']:
+ if not all("name" in each for each in options['volumes']):
+ raise ValueError(message)
+
+
+def multiple_vd_config(mod_args=None, pd_filter="", each_vd=None):
+ if mod_args is None:
+ mod_args = {}
+ if each_vd is None:
+ each_vd = {}
+ if each_vd:
+ mod_args.update(each_vd)
+ disk_size = None
+ location_list = []
+ id_list = []
+ size = mod_args.get("capacity")
+ drives = mod_args.get("drives")
+ if drives:
+ if "location" in drives:
+ location_list = drives.get("location")
+ elif "id" in drives:
+ id_list = drives.get("id")
+ if size is not None:
+ size_check = float(size)
+ disk_size = "{0}".format(int(size_check * 1073741824))
+
+ if mod_args['media_type'] is not None:
+ pd_filter += ' and disk.MediaType == "{0}"'.format(mod_args['media_type'])
+ if mod_args["protocol"] is not None:
+ pd_filter += ' and disk.BusProtocol == "{0}"'.format(mod_args["protocol"])
+ pd_selection = pd_filter
+
+ if location_list:
+ slots = ""
+ for i in location_list:
+ slots += "\"" + str(i) + "\","
+ slots_list = "[" + slots[0:-1] + "]"
+ pd_selection += " and disk.Slot._value in " + slots_list
+ elif id_list:
+ pd_selection += " and disk.FQDD._value in " + str(id_list)
+
+ raid_init_operation, raid_reset_config = "None", "False"
+ if mod_args['raid_init_operation'] == "None":
+ raid_init_operation = RAIDinitOperationTypes.T_None
+ if mod_args['raid_init_operation'] == "Fast":
+ raid_init_operation = RAIDinitOperationTypes.Fast
+
+ if mod_args['raid_reset_config'] == "False":
+ raid_reset_config = RAIDresetConfigTypes.T_False
+ if mod_args['raid_reset_config'] == "True":
+ raid_reset_config = RAIDresetConfigTypes.T_True
+
+ vd_value = dict(
+ Name=mod_args.get("name"),
+ SpanDepth=int(mod_args['span_depth']),
+ SpanLength=int(mod_args['span_length']),
+ NumberDedicatedHotSpare=int(mod_args['number_dedicated_hot_spare']),
+ RAIDTypes=mod_args["volume_type"],
+ DiskCachePolicy=DiskCachePolicyTypes[mod_args['disk_cache_policy']],
+ RAIDdefaultWritePolicy=mod_args['write_cache_policy'],
+ RAIDdefaultReadPolicy=RAIDdefaultReadPolicyTypes[mod_args['read_cache_policy']],
+ StripeSize=int(mod_args['stripe_size']),
+ RAIDforeignConfig="Clear",
+ RAIDaction=RAIDactionTypes.Create,
+ PhysicalDiskFilter=pd_selection,
+ Size=disk_size,
+ RAIDresetConfig=raid_reset_config,
+ RAIDinitOperation=raid_init_operation,
+ PDSlots=location_list,
+ ControllerFQDD=mod_args.get("controller_id"),
+ mediatype=mod_args['media_type'],
+ busprotocol=mod_args["protocol"],
+ FQDD=id_list
+ )
+ return vd_value
+
+
+def run_server_raid_config(idrac, module):
+ if module.params['state'] == "view":
+ storage_status = view_storage(idrac, module)
+ if module.params['state'] == "create":
+ set_liason_share(idrac, module)
+ storage_status = create_storage(idrac, module)
+ if module.params['state'] == "delete":
+ set_liason_share(idrac, module)
+ storage_status = delete_storage(idrac, module)
+ return storage_status
+
+
+def main():
+ specs = {
+ "state": {"required": False, "choices": ['create', 'delete', 'view'], "default": 'view'},
+ "volume_id": {"required": False, "type": 'str'},
+ "volumes": {"required": False, "type": 'list', "elements": 'dict'},
+ "span_depth": {"required": False, "type": 'int', "default": 1},
+ "span_length": {"required": False, "type": 'int', "default": 1},
+ "number_dedicated_hot_spare": {"required": False, "type": 'int', "default": 0},
+ "volume_type": {"required": False,
+ "choices": ['RAID 0', 'RAID 1', 'RAID 5', 'RAID 6', 'RAID 10', 'RAID 50', 'RAID 60'],
+ "default": 'RAID 0'},
+ "disk_cache_policy": {"required": False, "choices": ["Default", "Enabled", "Disabled"],
+ "default": "Default"},
+ "write_cache_policy": {"required": False, "choices": ["WriteThrough", "WriteBack", "WriteBackForce"],
+ "default": "WriteThrough"},
+ "read_cache_policy": {"required": False, "choices": ["NoReadAhead", "ReadAhead", "AdaptiveReadAhead"],
+ "default": "NoReadAhead"},
+ "stripe_size": {"required": False, "type": 'int', "default": 64 * 1024},
+ "capacity": {"required": False, "type": 'float'},
+ "controller_id": {"required": False, "type": 'str'},
+ "media_type": {"required": False, "choices": ['HDD', 'SSD']},
+ "protocol": {"required": False, "choices": ['SAS', 'SATA']},
+ "raid_reset_config": {"required": False, "choices": ['True', 'False'], "default": 'False'},
+ "raid_init_operation": {"required": False, "choices": ['None', 'Fast']}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ _validate_options(module.params)
+ with iDRACConnection(module.params) as idrac:
+ storage_status = run_server_raid_config(idrac, module)
+ changed = False
+ if 'changes_applicable' in storage_status:
+ changed = storage_status['changes_applicable']
+ elif module.params['state'] != 'view':
+ if storage_status.get("Status", "") == "Success":
+ changed = True
+ if storage_status.get("Message", "") == "No changes found to commit!" \
+ or storage_status.get("Message", "") == "Unable to find the virtual disk":
+ changed = False
+ module.exit_json(msg=storage_status.get('Message', ""),
+ changed=changed, storage_status=storage_status)
+ elif storage_status.get("Status") == "Failed":
+ module.fail_json(msg=storage_status.get("Message"))
+ else:
+ module.fail_json(msg="Failed to perform storage operation")
+ except (ImportError, ValueError, RuntimeError, TypeError) as e:
+ module.fail_json(msg=str(e))
+ msg = "Successfully completed the {0} storage volume operation".format(module.params['state'])
+ if module.check_mode and module.params['state'] != 'view':
+ msg = storage_status.get("Message", "")
+ module.exit_json(msg=msg, changed=changed, storage_status=storage_status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py
new file mode 100644
index 000000000..3be038e44
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.0.0
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: dellemc_system_lockdown_mode
+short_description: Configures system lockdown mode for iDRAC
+version_added: "1.0.0"
+deprecated:
+ removed_at_date: "2024-07-31"
+ why: Replaced with M(dellemc.openmanage.idrac_attributes).
+ alternative: Use M(dellemc.openmanage.idrac_attributes) instead.
+ removed_from_collection: dellemc.openmanage
+description:
+ - This module is allows to Enable or Disable System lockdown Mode.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ share_name:
+ type: str
+ description:
+ - (deprecated)Network share or a local path.
+ - This option is deprecated and will be removed in the later version.
+ share_user:
+ type: str
+ description:
+ - (deprecated)Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ - This option is deprecated and will be removed in the later version.
+ share_password:
+ type: str
+ description:
+ - (deprecated)Network share user password. This option is mandatory for CIFS Network Share.
+ - This option is deprecated and will be removed in the later version.
+ aliases: ['share_pwd']
+ share_mnt:
+ type: str
+ description:
+ - (deprecated)Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for Network Share.
+ - This option is deprecated and will be removed in the later version.
+ lockdown_mode:
+ required: True
+ type: str
+ description: Whether to Enable or Disable system lockdown mode.
+ choices: [Enabled, Disabled]
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module does not support C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Check System Lockdown Mode
+ dellemc.openmanage.dellemc_system_lockdown_mode:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ lockdown_mode: "Disabled"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: "Lockdown mode of the system is configured."
+ returned: always
+ type: str
+ sample: "Successfully completed the lockdown mode operations."
+system_lockdown_status:
+ type: dict
+ description: Storage configuration job and progress details from the iDRAC.
+ returned: success
+ sample:
+ {
+ "Data": {
+ "StatusCode": 200,
+ "body": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Successfully Completed Request",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "Base.1.0.Success",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "None",
+ "Severity": "OK"
+ }
+ ]
+ }
+ },
+ "Message": "none",
+ "Status": "Success",
+ "StatusCode": 200,
+ "retval": true
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import os
+import tempfile
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+try:
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+# Get Lifecycle Controller status
+def run_system_lockdown_mode(idrac, module):
+ """
+ Get Lifecycle Controller status
+
+ Keyword arguments:
+ idrac -- iDRAC handle
+ module -- Ansible module
+ """
+ msg = {'changed': False, 'failed': False, 'msg': "Successfully completed the lockdown mode operations."}
+ idrac.use_redfish = True
+ share_path = tempfile.gettempdir() + os.sep
+ upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True)
+ if not upd_share.IsValid:
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ set_liason = idrac.config_mgr.set_liason_share(upd_share)
+ if set_liason['Status'] == "Failed":
+ try:
+ message = set_liason['Data']['Message']
+ except (IndexError, KeyError):
+ message = set_liason['Message']
+ module.fail_json(msg=message)
+ if module.params['lockdown_mode'] == 'Enabled':
+ msg["system_lockdown_status"] = idrac.config_mgr.enable_system_lockdown()
+ elif module.params['lockdown_mode'] == 'Disabled':
+ msg["system_lockdown_status"] = idrac.config_mgr.disable_system_lockdown()
+
+ if msg.get("system_lockdown_status") and "Status" in msg['system_lockdown_status']:
+ if msg['system_lockdown_status']['Status'] == "Success":
+ msg['changed'] = True
+ else:
+ module.fail_json(msg="Failed to complete the lockdown mode operations.")
+ return msg
+
+
+# Main
+def main():
+ specs = dict(
+ share_name=dict(required=False, type='str'),
+ share_password=dict(required=False, type='str',
+ aliases=['share_pwd'], no_log=True),
+ share_user=dict(required=False, type='str'),
+ share_mnt=dict(required=False, type='str'),
+ lockdown_mode=dict(required=True, choices=['Enabled', 'Disabled'])
+ )
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=False)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ msg = run_system_lockdown_mode(idrac, module)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, ImportError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e:
+ module.fail_json(msg=str(e))
+ module.exit_json(msg=msg["msg"], system_lockdown_status=msg["system_lockdown_status"], changed=msg["changed"])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py
new file mode 100644
index 000000000..c9c80854a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py
@@ -0,0 +1,524 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.0.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_attributes
+short_description: Configure the iDRAC attributes.
+version_added: "6.0.0"
+description:
+ - This module allows to configure the iDRAC attributes.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ idrac_attributes:
+ type: dict
+ description:
+ - "Dictionary of iDRAC attributes and value. The attributes should be
+ part of the Integrated Dell Remote Access Controller Attribute Registry.
+ To view the list of attributes in Attribute Registry for iDRAC9 and above,
+ see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1)
+ and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
+ - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
+ (for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is
+ <GroupName>.<Instance>.<AttributeName> (for Example, 'SNMP.1.AgentCommunity')."
+ system_attributes:
+ type: dict
+ description:
+ - "Dictionary of System attributes and value. The attributes should be
+ part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and above,
+ see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1)
+ and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
+ - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
+ (for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is
+ <GroupName>.<Instance>.<AttributeName> (for Example, 'ThermalSettings.1.ThermalProfile')."
+ lifecycle_controller_attributes:
+ type: dict
+ description:
+ - "Dictionary of Lifecycle Controller attributes and value. The attributes should be
+ part of the Integrated Dell Remote Access Controller Attribute Registry.To view the list of attributes in Attribute Registry for iDRAC9 and above,
+ see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1)
+ and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
+ - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
+ (for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is
+ <GroupName>.<Instance>.<AttributeName> (for Example, 'LCAttributes.1.AutoUpdate')."
+ resource_id:
+ type: str
+ description: Redfish ID of the resource.
+requirements:
+ - "python >= 3.8.6"
+author:
+ - Husniya Abdul Hameed (@husniya-hameed)
+ - Felix Stephen (@felixs88)
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports C(check_mode).
+ - For iDRAC7 and iDRAC8 based servers, the value provided for the attributes are not be validated.
+ Ensure appropriate values are passed.
+'''
+
+EXAMPLES = """
+---
+- name: Configure iDRAC attributes
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ idrac_attributes:
+ SNMP.1.AgentCommunity: public
+
+- name: Configure System attributes
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ system_attributes:
+ ThermalSettings.1.ThermalProfile: Sound Cap
+
+- name: Configure Lifecycle Controller attributes
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ lifecycle_controller_attributes:
+ LCAttributes.1.AutoUpdate: Enabled
+
+- name: Configure the iDRAC attributes for email alert settings.
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ idrac_attributes:
+ EmailAlert.1.CustomMsg: Display Message
+ EmailAlert.1.Enable: Enabled
+ EmailAlert.1.Address: test@test.com
+
+- name: Configure the iDRAC attributes for SNMP alert settings.
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ idrac_attributes:
+ SNMPAlert.1.Destination: 192.168.0.2
+ SNMPAlert.1.State: Enabled
+ SNMPAlert.1.SNMPv3Username: username
+
+- name: Configure the iDRAC attributes for SMTP alert settings.
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ idrac_attributes:
+ RemoteHosts.1.SMTPServerIPAddress: 192.168.0.3
+ RemoteHosts.1.SMTPAuthentication: Enabled
+ RemoteHosts.1.SMTPPort: 25
+ RemoteHosts.1.SMTPUserName: username
+ RemoteHosts.1.SMTPPassword: password
+
+- name: Configure the iDRAC attributes for webserver settings.
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ idrac_attributes:
+ WebServer.1.SSLEncryptionBitLength: 128-Bit or higher
+ WebServer.1.TLSProtocol: TLS 1.1 and Higher
+
+- name: Configure the iDRAC attributes for SNMP settings.
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ idrac_attributes:
+ SNMP.1.SNMPProtocol: All
+ SNMP.1.AgentEnable: Enabled
+ SNMP.1.TrapFormat: SNMPv1
+ SNMP.1.AlertPort: 162
+ SNMP.1.AgentCommunity: public
+
+- name: Configure the iDRAC LC attributes for collecting system inventory.
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ lifecycle_controller_attributes:
+ LCAttributes.1.CollectSystemInventoryOnRestart: Enabled
+
+- name: Configure the iDRAC system attributes for LCD configuration.
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ system_attributes:
+ LCD.1.Configuration: Service Tag
+ LCD.1.vConsoleIndication: Enabled
+ LCD.1.FrontPanelLocking: Full-Access
+ LCD.1.UserDefinedString: custom string
+
+- name: Configure the iDRAC attributes for Timezone settings.
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ idrac_attributes:
+ Time.1.TimeZone: CST6CDT
+ NTPConfigGroup.1.NTPEnable: Enabled
+ NTPConfigGroup.1.NTP1: 192.168.0.5
+ NTPConfigGroup.1.NTP2: 192.168.0.6
+ NTPConfigGroup.1.NTP3: 192.168.0.7
+
+- name: Configure all attributes
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ idrac_attributes:
+ SNMP.1.AgentCommunity: test
+ SNMP.1.AgentEnable: Enabled
+ SNMP.1.DiscoveryPort: 161
+ system_attributes:
+ ServerOS.1.HostName: demohostname
+ lifecycle_controller_attributes:
+ LCAttributes.1.AutoUpdate: Disabled
+"""
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the attribute update operation.
+ returned: always
+ sample: "Successfully updated the attributes."
+invalid_attributes:
+ type: dict
+ description: Dict of invalid attributes provided.
+ returned: on invalid attributes or values.
+ sample: {
+ "LCAttributes.1.AutoUpdate": "Invalid value for Enumeration.",
+ "LCAttributes.1.StorageHealthRollupStatus": "Read only Attribute cannot be modified.",
+ "SNMP.1.AlertPort": "Not a valid integer.",
+ "SNMP.1.AlertPorty": "Attribute does not exist.",
+ "SysLog.1.PowerLogInterval": "Integer out of valid range.",
+ "ThermalSettings.1.AirExhaustTemp": "Invalid value for Enumeration."
+ }
+error_info:
+ description: Error information of the operation.
+ returned: when attribute value is invalid.
+ type: dict
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "The value 'false' for the property LCAttributes.1.BIOSRTDRequested is of a different type than the property can accept.",
+ "MessageArgs": [
+ "false",
+ "LCAttributes.1.BIOSRTDRequested"
+ ],
+ "MessageArgs@odata.count": 2,
+ "MessageId": "Base.1.12.PropertyValueTypeError",
+ "RelatedProperties": [
+ "#/Attributes/LCAttributes.1.BIOSRTDRequested"
+ ],
+ "RelatedProperties@odata.count": 1,
+ "Resolution": "Correct the value for the property in the request body and resubmit the request if the operation failed.",
+ "Severity": "Warning"
+ }
+ ],
+ "code": "Base.1.12.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information"
+ }
+ }
+'''
+
+import json
+import re
+from ssl import SSLError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_manager_res_id
+from ansible.module_utils.basic import AnsibleModule
+
+
+SUCCESS_MSG = "Successfully updated the attributes."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_MSG = "Changes found to be applied."
+ATTR_FAIL_MSG = "Application of some of the attributes failed due to invalid value or enumeration."
+SYSTEM_ID = "System.Embedded.1"
+MANAGER_ID = "iDRAC.Embedded.1"
+LC_ID = "LifecycleController.Embedded.1"
+MANAGERS_URI = "/redfish/v1/Managers"
+ATTR = "Attributes"
+JOB_URI = "/redfish/v1/Managers/{manager_id}/Jobs/{job_id}"
+
+
+def xml_data_conversion(attrbite, fqdd=None):
+ component = """<Component FQDD="{0}">{1}</Component>"""
+ attr = ""
+ json_data = {}
+ for k, v in attrbite.items():
+ key = re.sub(r"(?<=\d)\.", "#", k)
+ attr += '<Attribute Name="{0}">{1}</Attribute>'.format(key, v)
+ json_data[key] = str(v)
+ root = component.format(fqdd, attr)
+ return root, json_data
+
+
+def validate_attr_name(attribute, req_data):
+ invalid_attr = {}
+ data_dict = {attr["Name"]: attr["Value"] for attr in attribute if attr["Name"] in req_data.keys()}
+ if not len(data_dict) == len(req_data):
+ for key in req_data.keys():
+ if key not in data_dict:
+ act_key = key.replace("#", ".")
+ invalid_attr[act_key] = "Attribute does not exist."
+ return data_dict, invalid_attr
+
+
+def get_check_mode(module, idrac, idrac_json, sys_json, lc_json):
+ scp_response = idrac.export_scp(export_format="JSON", export_use="Default",
+ target="iDRAC,System,LifecycleController", job_wait=True)
+ comp = scp_response.json_data["SystemConfiguration"]["Components"]
+ exist_idrac, exist_sys, exist_lc, invalid = {}, {}, {}, {}
+ for cmp in comp:
+ if idrac_json and cmp.get("FQDD") == MANAGER_ID:
+ exist_idrac, invalid_attr = validate_attr_name(cmp["Attributes"], idrac_json)
+ if invalid_attr:
+ invalid.update(invalid_attr)
+ if sys_json and cmp.get("FQDD") == SYSTEM_ID:
+ exist_sys, invalid_attr = validate_attr_name(cmp["Attributes"], sys_json)
+ if invalid_attr:
+ invalid.update(invalid_attr)
+ if lc_json and cmp.get("FQDD") == LC_ID:
+ exist_lc, invalid_attr = validate_attr_name(cmp["Attributes"], lc_json)
+ if invalid_attr:
+ invalid.update(invalid_attr)
+ if invalid:
+ module.fail_json(msg="Attributes have invalid values.", invalid_attributes=invalid)
+ diff_change = [bool(set(exist_idrac.items()) ^ set(idrac_json.items())) or
+ bool(set(exist_sys.items()) ^ set(sys_json.items())) or
+ bool(set(exist_lc.items()) ^ set(lc_json.items()))]
+ if module.check_mode and any(diff_change) is True:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ elif (module.check_mode and all(diff_change) is False) or \
+ (not module.check_mode and all(diff_change) is False):
+ module.exit_json(msg=NO_CHANGES_MSG)
+
+
+def scp_idrac_attributes(module, idrac, res_id):
+ job_wait = module.params.get("job_wait", True)
+ idrac_attr = module.params.get("idrac_attributes")
+ system_attr = module.params.get("system_attributes")
+ lc_attr = module.params.get("lifecycle_controller_attributes")
+ root = """<SystemConfiguration>{0}</SystemConfiguration>"""
+ component = ""
+ idrac_json_data, system_json_data, lc_json_data = {}, {}, {}
+ if idrac_attr is not None:
+ idrac_xml_payload, idrac_json_data = xml_data_conversion(idrac_attr, fqdd=MANAGER_ID)
+ component += idrac_xml_payload
+ if system_attr is not None:
+ system_xml_payload, system_json_data = xml_data_conversion(system_attr, fqdd=SYSTEM_ID)
+ component += system_xml_payload
+ if lc_attr is not None:
+ lc_xml_payload, lc_json_data = xml_data_conversion(lc_attr, fqdd=LC_ID)
+ component += lc_xml_payload
+ get_check_mode(module, idrac, idrac_json_data, system_json_data, lc_json_data,)
+ payload = root.format(component)
+ resp = idrac.import_scp(import_buffer=payload, target="ALL", job_wait=False)
+ job_id = resp.headers["Location"].split("/")[-1]
+ job_uri = JOB_URI.format(manager_id=res_id, job_id=job_id)
+ job_resp = idrac.wait_for_job_completion(job_uri, job_wait=job_wait)
+ return job_resp
+
+
+def get_response_attr(idrac, idrac_id, attr, uri_dict):
+ response_attr = {}
+ diff = 0
+ response = idrac.invoke_request(uri_dict.get(idrac_id), "GET")
+ for k in attr.keys():
+ if response.json_data[ATTR].get(k) != attr.get(k):
+ # response_attr[k] = response.json_data[ATTR].get(k)
+ response_attr[k] = attr.get(k)
+ diff += 1
+ return diff, response_attr
+
+
+def get_attributes_registry(idrac):
+ reggy = {}
+ try:
+ resp = idrac.invoke_request("/redfish/v1/Registries/ManagerAttributeRegistry", "GET")
+ loc_list = resp.json_data.get("Location", [])
+ if loc_list:
+ reg_json_uri = loc_list[-1].get("Uri")
+ reg_resp = idrac.invoke_request(reg_json_uri, "GET")
+ attr_list = reg_resp.json_data.get("RegistryEntries").get("Attributes")
+ reggy = dict((x["AttributeName"], x) for x in attr_list)
+ except Exception:
+ reggy = {}
+ return reggy
+
+
+def validate_vs_registry(registry, attr_dict):
+ invalid = {}
+ for k, v in attr_dict.items():
+ if k in registry:
+ val_dict = registry.get(k)
+ if val_dict.get("Readonly"):
+ invalid[k] = "Read only Attribute cannot be modified."
+ else:
+ type = val_dict.get("Type")
+ if type == "Enumeration":
+ found = False
+ for val in val_dict.get("Value", []):
+ if v == val.get("ValueDisplayName"):
+ found = True
+ break
+ if not found:
+ invalid[k] = "Invalid value for Enumeration."
+ if type == "Integer":
+ try:
+ i = int(v)
+ except Exception:
+ invalid[k] = "Not a valid integer."
+ else:
+ if not (val_dict.get("LowerBound") <= i <= val_dict.get("UpperBound")):
+ invalid[k] = "Integer out of valid range."
+ else:
+ invalid[k] = "Attribute does not exist."
+ return invalid
+
+
+def fetch_idrac_uri_attr(idrac, module, res_id):
+ diff = 0
+ uri_dict = {}
+ idrac_response_attr = {}
+ system_response_attr = {}
+ lc_response_attr = {}
+ response = idrac.invoke_request("{0}/{1}".format(MANAGERS_URI, res_id), "GET")
+ dell_attributes = response.json_data.get('Links', {}).get('Oem', {}).get('Dell', {}).get('DellAttributes')
+ if dell_attributes:
+ for item in dell_attributes:
+ uri = item.get('@odata.id')
+ attr_id = uri.split("/")[-1]
+ uri_dict[attr_id] = uri
+ idrac_attr = module.params.get("idrac_attributes")
+ system_attr = module.params.get("system_attributes")
+ lc_attr = module.params.get("lifecycle_controller_attributes")
+ invalid = {}
+ attr_registry = get_attributes_registry(idrac)
+ if idrac_attr is not None:
+ x, idrac_response_attr = get_response_attr(idrac, MANAGER_ID, idrac_attr, uri_dict)
+ invalid.update(validate_vs_registry(attr_registry, idrac_response_attr))
+ diff += x
+ if system_attr is not None:
+ x, system_response_attr = get_response_attr(idrac, SYSTEM_ID, system_attr, uri_dict)
+ invalid.update(validate_vs_registry(attr_registry, system_response_attr))
+ diff += x
+ if lc_attr is not None:
+ x, lc_response_attr = get_response_attr(idrac, LC_ID, lc_attr, uri_dict)
+ invalid.update(validate_vs_registry(attr_registry, lc_response_attr))
+ diff += x
+ if invalid:
+ module.exit_json(failed=True, msg="Attributes have invalid values.", invalid_attributes=invalid)
+ else:
+ job_resp = scp_idrac_attributes(module, idrac, res_id)
+ if job_resp.status_code == 200:
+ error_msg = ["Unable to complete application of configuration profile values.",
+ "Import of Server Configuration Profile operation completed with errors."]
+ message = job_resp.json_data["Message"]
+ message_id = job_resp.json_data["MessageId"]
+ if message_id == "SYS069":
+ module.exit_json(msg=NO_CHANGES_MSG)
+ elif message_id == "SYS053":
+ module.exit_json(msg=SUCCESS_MSG, changed=True)
+ elif message in error_msg:
+ module.fail_json(msg=ATTR_FAIL_MSG)
+ else:
+ module.fail_json(msg=message)
+ return diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr
+
+
+def process_check_mode(module, diff):
+ if not diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ elif diff and module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+
+
+def update_idrac_attributes(idrac, module, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr):
+ resp = {}
+ idrac_payload = module.params.get("idrac_attributes")
+ system_payload = module.params.get("system_attributes")
+ lc_payload = module.params.get("lifecycle_controller_attributes")
+ if idrac_payload is not None and idrac_response_attr is not None:
+ idrac_response = idrac.invoke_request(uri_dict.get(MANAGER_ID), "PATCH", data={ATTR: idrac_payload})
+ resp["iDRAC"] = idrac_response.json_data
+ if system_payload is not None and system_response_attr is not None:
+ system_response = idrac.invoke_request(uri_dict.get(SYSTEM_ID), "PATCH", data={ATTR: system_payload})
+ resp["System"] = system_response.json_data
+ if lc_payload is not None and lc_response_attr is not None:
+ lc_response = idrac.invoke_request(uri_dict.get(LC_ID), "PATCH", data={ATTR: lc_payload})
+ resp["Lifecycle Controller"] = lc_response.json_data
+ return resp
+
+
+def main():
+ specs = {
+ "idrac_attributes": {"required": False, "type": 'dict'},
+ "system_attributes": {"required": False, "type": 'dict'},
+ "lifecycle_controller_attributes": {"required": False, "type": 'dict'},
+ "resource_id": {"required": False, "type": 'str'}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[('idrac_attributes', 'system_attributes', 'lifecycle_controller_attributes')],
+ supports_check_mode=True
+ )
+ try:
+ with iDRACRedfishAPI(module.params, req_session=True) as idrac:
+ res_id = module.params.get('resource_id')
+ if not res_id:
+ res_id = get_manager_res_id(idrac)
+ diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr = fetch_idrac_uri_attr(idrac, module, res_id)
+ process_check_mode(module, diff)
+ resp = update_idrac_attributes(idrac, module, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr)
+ module.exit_json(msg=SUCCESS_MSG, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
new file mode 100644
index 000000000..8cd9c5e7b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
@@ -0,0 +1,820 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 6.2.0
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_bios
+short_description: Modify and clear BIOS attributes, reset BIOS settings and configure boot sources
+version_added: "2.1.0"
+description:
+ - This module allows to modify the BIOS attributes. Also clears pending BIOS attributes and resets BIOS to default settings.
+ - Boot sources can be enabled or disabled. Boot sequence can be configured.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ share_name:
+ type: str
+ description: (deprecated)Network share or a local path.
+ share_user:
+ type: str
+ description: "(deprecated)Network share user name. Use the format 'user@domain' or domain//user if user
+ is part of a domain. This option is mandatory for CIFS share."
+ share_password:
+ type: str
+ description: (deprecated)Network share user password. This option is mandatory for CIFS share.
+ aliases: ['share_pwd']
+ share_mnt:
+ type: str
+ description: "(deprecated)Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for network shares."
+ apply_time:
+ type: str
+ description:
+ - Apply time of the I(attributes).
+ - This is applicable only to I(attributes).
+ - "C(Immediate) Allows the user to immediately reboot the host and apply the changes. I(job_wait)
+ is applicable."
+ - C(OnReset) Allows the user to apply the changes on the next reboot of the host server.
+ - "C(AtMaintenanceWindowStart) Allows the user to apply at the start of a maintenance window as specified
+ in I(maintenance_window). A reboot job will be scheduled."
+ - "C(InMaintenanceWindowOnReset) Allows to apply after a manual reset but within the maintenance window as
+ specified in I(maintenance_window)."
+ choices: [Immediate, OnReset, AtMaintenanceWindowStart, InMaintenanceWindowOnReset]
+ default: Immediate
+ maintenance_window:
+ type: dict
+ description:
+ - Option to schedule the maintenance window.
+ - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset).
+ suboptions:
+ start_time:
+ type: str
+ description:
+ - The start time for the maintenance window to be scheduled.
+ - "The format is YYYY-MM-DDThh:mm:ss<offset>"
+ - "<offset> is the time offset from UTC that the current timezone set in
+ iDRAC in the format: +05:30 for IST."
+ required: True
+ duration:
+ type: int
+ description:
+ - The duration in seconds for the maintenance window.
+ required: True
+ attributes:
+ type: dict
+ description:
+ - "Dictionary of BIOS attributes and value pair. Attributes should be
+ part of the Redfish Dell BIOS Attribute Registry. Use
+ U(https://I(idrac_ip)/redfish/v1/Systems/System.Embedded.1/Bios) to view the Redfish URI."
+ - This is mutually exclusive with I(boot_sources), I(clear_pending), and I(reset_bios).
+ boot_sources:
+ type: list
+ elements: raw
+ description:
+ - (deprecated)List of boot devices to set the boot sources settings.
+ - I(boot_sources) is mutually exclusive with I(attributes), I(clear_pending), and I(reset_bios).
+ - I(job_wait) is not applicable. The module waits till the completion of this task.
+ - This feature is deprecated, please use M(dellemc.openmanage.idrac_boot) for configuring boot sources.
+ clear_pending:
+ type: bool
+ description:
+ - Allows the user to clear all pending BIOS attributes changes.
+ - C(true) will discard any pending changes to bios attributes or remove job if in scheduled state.
+ - This operation will not create any job.
+ - C(false) will not perform any operation.
+ - This is mutually exclusive with I(boot_sources), I(attributes), and I(reset_bios).
+ - C(Note) Any BIOS job scheduled due to boot sources configuration will not be cleared.
+ reset_bios:
+ type: bool
+ description:
+ - Resets the BIOS to default settings and triggers a reboot of host system.
+ - This is applied to the host after the restart.
+ - This operation will not create any job.
+ - C(false) will not perform any operation.
+ - This is mutually exclusive with I(boot_sources), I(attributes), and I(clear_pending).
+ - When C(true), this action will always report as changes found to be applicable.
+ reset_type:
+ type: str
+ description:
+ - C(force_restart) Forcefully reboot the host system.
+ - C(graceful_restart) Gracefully reboot the host system.
+ - This is applicable for I(reset_bios), and I(attributes) when I(apply_time) is C(Immediate).
+ choices: [graceful_restart, force_restart]
+ default: graceful_restart
+ job_wait:
+ type: bool
+ description:
+ - Provides the option to wait for job completion.
+ - This is applicable for I(attributes) when I(apply_time) is C(Immediate).
+ default: true
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ default: 1200
+requirements:
+ - "omsdk >= 1.2.490"
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+ - "Jagadeesh N V (@jagadeeshnv)"
+notes:
+ - omsdk is required to be installed only for I(boot_sources) operation.
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure generic attributes of the BIOS
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ attributes:
+ BootMode : "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+
+- name: Configure PXE generic attributes
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ attributes:
+ PxeDev1EnDis: "Enabled"
+ PxeDev1Protocol: "IPV4"
+ PxeDev1VlanEnDis: "Enabled"
+ PxeDev1VlanId: 1
+ PxeDev1Interface: "NIC.Embedded.1-1-1"
+ PxeDev1VlanPriority: 2
+
+- name: Configure BIOS attributes at Maintenance window
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+ attributes:
+ BootMode : "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+
+- name: Clear pending BIOS attributes
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ clear_pending: yes
+
+- name: Reset BIOS attributes to default settings.
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_pwd }}"
+ validate_certs: False
+ reset_bios: yes
+
+- name: Configure boot sources
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_sources:
+ - Name : "NIC.Integrated.1-2-3"
+ Enabled : true
+ Index : 0
+
+- name: Configure multiple boot sources
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_sources:
+ - Name : "NIC.Integrated.1-1-1"
+ Enabled : true
+ Index : 0
+ - Name : "NIC.Integrated.2-2-2"
+ Enabled : true
+ Index : 1
+ - Name : "NIC.Integrated.3-3-3"
+ Enabled : true
+ Index : 2
+
+- name: Configure boot sources - Enabling
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_sources:
+ - Name : "NIC.Integrated.1-1-1"
+ Enabled : true
+
+- name: Configure boot sources - Index
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_sources:
+ - Name : "NIC.Integrated.1-1-1"
+ Index : 0
+"""
+
+RETURN = """
+---
+status_msg:
+ description: Overall status of the bios operation.
+ returned: success
+ type: str
+ sample: Successfully cleared pending BIOS attributes.
+msg:
+ description: Status of the job for I(boot_sources) or status of the action performed on bios.
+ returned: success
+ type: dict
+ sample: {
+ "CompletionTime": "2020-04-20T18:50:20",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_873888162305",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "SYS053",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+invalid_attributes:
+ type: dict
+ description: Dict of invalid attributes provided.
+ returned: on invalid attributes or values.
+ sample: {
+ "PxeDev1VlanId": "Not a valid integer.",
+ "AcPwrRcvryUserDelay": "Integer out of valid range.",
+ "BootSeqRetry": "Invalid value for Enumeration.",
+ "Proc1Brand": "Read only Attribute cannot be modified.",
+ "AssetTag": "Attribute does not exist."
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+SYSTEM_URI = "/redfish/v1/Systems/System.Embedded.1"
+MANAGER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1"
+BIOS_URI = "/redfish/v1/Systems/System.Embedded.1/Bios"
+BIOS_REGISTRY = "/redfish/v1/Systems/System.Embedded.1/Bios/BiosRegistry"
+CLEAR_PENDING_URI = "/redfish/v1/Systems/System.Embedded.1/Bios/Settings/Actions/Oem/DellManager.ClearPending"
+RESET_BIOS_DEFAULT = "/redfish/v1/Systems/System.Embedded.1/Bios/Actions/Bios.ResetBios"
+BIOS_SETTINGS = "/redfish/v1/Systems/System.Embedded.1/Bios/Settings"
+POWER_HOST_URI = "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
+IDRAC_JOBS_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs"
+iDRAC_JOBS_EXP = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs?$expand=*($levels=1)"
+iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+LOG_SERVICE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog"
+iDRAC9_LC_LOG = "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog/Entries"
+iDRAC8_LC_LOG = "/redfish/v1/Managers/iDRAC.Embedded.1/Logs/Lclog"
+LC_LOG_FILTER = "?$filter=MessageId%20eq%20'UEFI0157'"
+CPU_RST_FILTER = "?$filter=MessageId%20eq%20'SYS1003'"
+BIOS_JOB_RUNNING = "BIOS Config job is running. Wait for the job to complete."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_MSG = "Changes found to be applied."
+SUCCESS_CLEAR = "Successfully cleared the pending BIOS attributes."
+SUCCESS_COMPLETE = "Successfully applied the BIOS attributes update."
+SCHEDULED_SUCCESS = "Successfully scheduled the job for the BIOS attributes update."
+COMMITTED_SUCCESS = "Successfully committed changes. The job is in pending state. The changes will be applied {0}"
+RESET_TRIGGERRED = "Reset BIOS action triggered successfully."
+HOST_RESTART_FAILED = "Unable to restart the host. Check the host status and restart the host manually."
+BIOS_RESET_TRIGGERED = "The BIOS reset action has been triggered successfully. The host reboot is complete."
+BIOS_RESET_COMPLETE = "BIOS reset to defaults has been completed successfully."
+BIOS_RESET_PENDING = "Pending attributes to be applied. " \
+ "Clear or apply the pending changes before resetting the BIOS."
+FORCE_BIOS_DELETE = "The BIOS configuration job is scheduled. Use 'force' to delete the job."
+INVALID_ATTRIBUTES_MSG = "The values specified for the attributes are invalid."
+UNSUPPORTED_APPLY_TIME = "Apply time {0} is not supported."
+MAINTENANCE_OFFSET = "The maintenance time must be post-fixed with local offset to {0}."
+MAINTENANCE_TIME = "The specified maintenance time window occurs in the past, " \
+ "provide a future time to schedule the maintenance window."
+POWER_CHECK_RETRIES = 30
+POWER_CHECK_INTERVAL = 10
+
+import json
+import time
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import idrac_redfish_job_tracking, \
+ strip_substr_dict
+
+
+def run_server_bios_config(idrac, module):
+ msg = {}
+ idrac.use_redfish = True
+ _validate_params(module.params['boot_sources'])
+ if module.check_mode:
+ idrac.config_mgr.is_change_applicable()
+ msg = idrac.config_mgr.configure_boot_sources(input_boot_devices=module.params['boot_sources'])
+ return msg
+
+
+def _validate_params(params):
+ """
+ Validate list of dict params.
+ :param params: Ansible list of dict
+ :return: bool or error.
+ """
+ fields = [
+ {"name": "Name", "type": str, "required": True},
+ {"name": "Index", "type": int, "required": False, "min": 0},
+ {"name": "Enabled", "type": bool, "required": False}
+ ]
+ default = ['Name', 'Index', 'Enabled']
+ for attr in params:
+ if not isinstance(attr, dict):
+ msg = "attribute values must be of type: dict. {0} ({1}) provided.".format(attr, type(attr))
+ return msg
+ elif all(k in default for k in attr.keys()):
+ msg = check_params(attr, fields)
+ return msg
+ else:
+ msg = "attribute keys must be one of the {0}.".format(default)
+ return msg
+ msg = _validate_name_index_duplication(params)
+ return msg
+
+
+def _validate_name_index_duplication(params):
+ """
+ Validate for duplicate names and indices.
+ :param params: Ansible list of dict
+ :return: bool or error.
+ """
+ msg = ""
+ for i in range(len(params) - 1):
+ for j in range(i + 1, len(params)):
+ if params[i]['Name'] == params[j]['Name']:
+ msg = "duplicate name {0}".format(params[i]['Name'])
+ return msg
+ return msg
+
+
+def check_params(each, fields):
+ """
+ Each dictionary parameters validation as per the rule defined in fields.
+ :param each: validating each dictionary
+ :param fields: list of dictionary which has the set of rules.
+ :return: tuple which has err and message
+ """
+ msg = ""
+ for f in fields:
+ if f['name'] not in each and f["required"] is False:
+ continue
+ if not f["name"] in each and f["required"] is True:
+ msg = "{0} is required and must be of type: {1}".format(f['name'], f['type'])
+ elif not isinstance(each[f["name"]], f["type"]):
+ msg = "{0} must be of type: {1}. {2} ({3}) provided.".format(
+ f['name'], f['type'], each[f['name']], type(each[f['name']]))
+ elif f['name'] in each and isinstance(each[f['name']], int) and 'min' in f:
+ if each[f['name']] < f['min']:
+ msg = "{0} must be greater than or equal to: {1}".format(f['name'], f['min'])
+ return msg
+
+
+def check_scheduled_bios_job(redfish_obj):
+ job_resp = redfish_obj.invoke_request(iDRAC_JOBS_EXP, "GET")
+ job_list = job_resp.json_data.get('Members', [])
+ sch_jb = None
+ jb_state = 'Unknown'
+ for jb in job_list:
+ if jb.get("JobType") == "BIOSConfiguration" and jb.get("JobState") in ["Scheduled", "Running", "Starting"]:
+ sch_jb = jb['Id']
+ jb_state = jb.get("JobState")
+ break
+ return sch_jb, jb_state
+
+
+def delete_scheduled_bios_job(redfish_obj, job_id):
+ resp = redfish_obj.invoke_request(iDRAC_JOB_URI.format(job_id=job_id), "DELETE")
+ return resp
+
+
+def get_pending_attributes(redfish_obj):
+ try:
+ resp = redfish_obj.invoke_request(BIOS_SETTINGS, "GET")
+ attr = resp.json_data.get("Attributes")
+ except Exception:
+ attr = {}
+ return attr
+
+
+def get_power_state(redfish_obj):
+ retries = 3
+ pstate = "Unknown"
+ while retries > 0:
+ try:
+ resp = redfish_obj.invoke_request(SYSTEM_URI, "GET")
+ pstate = resp.json_data.get("PowerState")
+ break
+ except Exception:
+ retries = retries - 1
+ return pstate
+
+
+def power_act_host(redfish_obj, p_state):
+ try:
+ redfish_obj.invoke_request(POWER_HOST_URI, "POST", data={'ResetType': p_state})
+ p_act = True
+ except HTTPError:
+ p_act = False
+ return p_act
+
+
+def track_power_state(redfish_obj, desired_state, retries=POWER_CHECK_RETRIES, interval=POWER_CHECK_INTERVAL):
+ count = retries
+ while count:
+ ps = get_power_state(redfish_obj)
+ if ps in desired_state:
+ achieved = True
+ break
+ else:
+ time.sleep(interval)
+ count = count - 1
+ else:
+ achieved = False
+ return achieved
+
+
+def reset_host(module, redfish_obj):
+ reset_type = module.params.get('reset_type')
+ p_state = 'On'
+ ps = get_power_state(redfish_obj)
+ on_state = ["On"]
+ if ps in on_state:
+ p_state = 'GracefulShutdown'
+ if 'force' in reset_type:
+ p_state = 'ForceOff'
+ p_act = power_act_host(redfish_obj, p_state)
+ if not p_act:
+ module.exit_json(failed=True, status_msg=HOST_RESTART_FAILED)
+ state_achieved = track_power_state(redfish_obj, ["Off"]) # 30x10= 300 secs
+ p_state = "On"
+ if not state_achieved:
+ time.sleep(10)
+ p_state = "ForceRestart"
+ p_act = power_act_host(redfish_obj, p_state)
+ if not p_act:
+ module.exit_json(failed=True, status_msg=HOST_RESTART_FAILED)
+ state_achieved = track_power_state(redfish_obj, on_state) # 30x10= 300 secs
+ return state_achieved
+
+
+def get_current_time(redfish_obj):
+ try:
+ resp = redfish_obj.invoke_request(MANAGER_URI, "GET")
+ curr_time = resp.json_data.get("DateTime")
+ date_offset = resp.json_data.get("DateTimeLocalOffset")
+ except Exception:
+ return None, None
+ return curr_time, date_offset
+
+
+def track_log_entry(redfish_obj):
+ msg = None
+ filter_list = [LC_LOG_FILTER, CPU_RST_FILTER]
+ intrvl = 15
+ retries = 360 // intrvl
+ time.sleep(intrvl)
+ try:
+ resp = redfish_obj.invoke_request(LOG_SERVICE_URI, "GET")
+ uri = resp.json_data.get('Entries').get('@odata.id')
+ fltr_uris = []
+ for fltr in filter_list:
+ fltr_uris.append("{0}{1}".format(uri, fltr))
+ flen = len(fltr_uris)
+ fln = 1
+ pvt = retries // 3 # check for the SYS1003 after 2/3rds of retries
+ curr_time = resp.json_data.get('DateTime')
+ while retries:
+ resp = redfish_obj.invoke_request(fltr_uris[retries % fln], "GET")
+ logs_list = resp.json_data.get("Members")
+ for log in logs_list:
+ if log.get('Created') > curr_time:
+ msg = BIOS_RESET_COMPLETE
+ break
+ if msg:
+ break
+ retries = retries - 1
+ time.sleep(intrvl)
+ if retries < pvt:
+ fln = flen
+ else:
+ # msg = "{0}{1}".format(BIOS_RESET_TRIGGERED, "LOOPOVER")
+ msg = BIOS_RESET_TRIGGERED
+ except Exception as ex:
+ # msg = "{0}{1}".format(BIOS_RESET_TRIGGERED, str(ex))
+ msg = BIOS_RESET_TRIGGERED
+ return msg
+
+
+def reset_bios(module, redfish_obj):
+ attr = get_pending_attributes(redfish_obj)
+ if attr:
+ module.exit_json(status_msg=BIOS_RESET_PENDING, failed=True)
+ if module.check_mode:
+ module.exit_json(status_msg=CHANGES_MSG, changed=True)
+ resp = redfish_obj.invoke_request(RESET_BIOS_DEFAULT, "POST", data="{}", dump=True)
+ reset_success = reset_host(module, redfish_obj)
+ if not reset_success:
+ module.exit_json(failed=True, status_msg="{0} {1}".format(RESET_TRIGGERRED, HOST_RESTART_FAILED))
+ log_msg = track_log_entry(redfish_obj)
+ module.exit_json(status_msg=log_msg, changed=True)
+
+
+def clear_pending_bios(module, redfish_obj):
+ attr = get_pending_attributes(redfish_obj)
+ if not attr:
+ module.exit_json(status_msg=NO_CHANGES_MSG)
+ job_id, job_state = check_scheduled_bios_job(redfish_obj)
+ if job_id:
+ if job_state in ["Running", "Starting"]:
+ module.exit_json(failed=True, status_msg=BIOS_JOB_RUNNING, job_id=job_id)
+ elif job_state in ["Scheduled", "Scheduling"]:
+ # if module.params.get("force", True) == False:
+ # module.exit_json(status_msg=FORCE_BIOS_DELETE, job_id=job_id, failed=True)
+ if module.check_mode:
+ module.exit_json(status_msg=CHANGES_MSG, changed=True)
+ delete_scheduled_bios_job(redfish_obj, job_id)
+ module.exit_json(status_msg=SUCCESS_CLEAR, changed=True)
+ if module.check_mode:
+ module.exit_json(status_msg=CHANGES_MSG, changed=True)
+ resp = redfish_obj.invoke_request(CLEAR_PENDING_URI, "POST", data="{}", dump=False)
+ module.exit_json(status_msg=SUCCESS_CLEAR, changed=True)
+
+
+def get_attributes_registry(idrac):
+ reggy = {}
+ try:
+ resp = idrac.invoke_request(BIOS_REGISTRY, "GET")
+ attr_list = resp.json_data.get("RegistryEntries").get("Attributes")
+ reggy = dict((x["AttributeName"], x) for x in attr_list)
+ except Exception:
+ reggy = {}
+ return reggy
+
+
+def validate_vs_registry(registry, attr_dict):
+ invalid = {}
+ for k, v in attr_dict.items():
+ if k in registry:
+ val_dict = registry.get(k)
+ if val_dict.get("ReadOnly"):
+ invalid[k] = "Read only attribute cannot be modified."
+ else:
+ type = val_dict.get("Type")
+ if type == "Enumeration":
+ found = False
+ for val in val_dict.get("Value", []):
+ if v == val.get("ValueName"):
+ found = True
+ break
+ if not found:
+ invalid[k] = "Invalid value for enumeration."
+ if type == "Integer":
+ try:
+ i = int(v)
+ except Exception:
+ invalid[k] = "Invalid integer."
+ else:
+ if not (val_dict.get("LowerBound") <= i <= val_dict.get("UpperBound")):
+ invalid[k] = "Integer not in a valid range."
+ else:
+ invalid[k] = "The attribute does not exist."
+ return invalid
+
+
+def get_current_attributes(redfish_obj):
+ try:
+ resp = redfish_obj.invoke_request(BIOS_URI, "GET")
+ setting = resp.json_data
+ except Exception:
+ setting = {}
+ return setting
+
+
+def validate_time(module, redfish_obj, mtime):
+ curr_time, date_offset = get_current_time(redfish_obj)
+ if not mtime.endswith(date_offset):
+ module.exit_json(failed=True, status_msg=MAINTENANCE_OFFSET.format(date_offset))
+ if mtime < curr_time:
+ module.exit_json(failed=True, status_msg=MAINTENANCE_TIME)
+
+
+def get_redfish_apply_time(module, redfish_obj, aplytm, rf_settings):
+ rf_set = {}
+ reboot_req = False
+ if rf_settings:
+ if 'Maintenance' in aplytm:
+ if aplytm not in rf_settings:
+ module.exit_json(failed=True, status_msg=UNSUPPORTED_APPLY_TIME.format(aplytm))
+ else:
+ rf_set['ApplyTime'] = aplytm
+ m_win = module.params.get('maintenance_window')
+ validate_time(module, redfish_obj, m_win.get('start_time'))
+ rf_set['MaintenanceWindowStartTime'] = m_win.get('start_time')
+ rf_set['MaintenanceWindowDurationInSeconds'] = m_win.get('duration')
+ else: # assuming OnReset is always
+ if aplytm == "Immediate":
+ if aplytm not in rf_settings:
+ reboot_req = True
+ aplytm = 'OnReset'
+ rf_set['ApplyTime'] = aplytm
+ return rf_set, reboot_req
+
+
+def trigger_bios_job(redfish_obj):
+ job_id = None
+ payload = {"TargetSettingsURI": BIOS_SETTINGS}
+ resp = redfish_obj.invoke_request(IDRAC_JOBS_URI, "POST", data=payload)
+ job_id = resp.headers["Location"].split("/")[-1]
+ return job_id
+
+
+def apply_attributes(module, redfish_obj, pending, rf_settings):
+ payload = {"Attributes": pending}
+ aplytm = module.params.get('apply_time')
+ rf_set, reboot_required = get_redfish_apply_time(module, redfish_obj, aplytm, rf_settings)
+ if rf_set:
+ payload["@Redfish.SettingsApplyTime"] = rf_set
+ resp = redfish_obj.invoke_request(BIOS_SETTINGS, "PATCH", data=payload)
+ if rf_set:
+ tmp_resp = redfish_obj.invoke_request(resp.headers["Location"], "GET")
+ job_id = resp.headers["Location"].split("/")[-1]
+ else:
+ if aplytm == "Immediate":
+ reboot_required = True
+ job_id = trigger_bios_job(redfish_obj)
+ return job_id, reboot_required
+
+
+def attributes_config(module, redfish_obj):
+ curr_resp = get_current_attributes(redfish_obj)
+ curr_attr = curr_resp.get("Attributes", {})
+ inp_attr = module.params.get("attributes")
+ diff_tuple = recursive_diff(inp_attr, curr_attr)
+ attr = {}
+ if diff_tuple:
+ if diff_tuple[0]:
+ attr = diff_tuple[0]
+ invalid = {}
+ attr_registry = get_attributes_registry(redfish_obj)
+ if attr_registry:
+ invalid.update(validate_vs_registry(attr_registry, attr))
+ if invalid:
+ module.exit_json(failed=True, status_msg=INVALID_ATTRIBUTES_MSG, invalid_attributes=invalid)
+ if not attr:
+ module.exit_json(status_msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(status_msg=CHANGES_MSG, changed=True)
+ pending = get_pending_attributes(redfish_obj)
+ pending.update(attr)
+ if pending:
+ job_id, job_state = check_scheduled_bios_job(redfish_obj)
+ if job_id:
+ if job_state in ["Running", "Starting"]:
+ module.exit_json(status_msg=BIOS_JOB_RUNNING, job_id=job_id, failed=True)
+ elif job_state in ["Scheduled", "Scheduling"]:
+ # changes staged in pending attributes
+ # if module.params.get("force", True) == False:
+ # module.exit_json(status_msg=FORCE_BIOS_DELETE, job_id=job_id, failed=True)
+ delete_scheduled_bios_job(redfish_obj, job_id)
+ rf_settings = curr_resp.get("@Redfish.Settings", {}).get("SupportedApplyTimes", [])
+ job_id, reboot_required = apply_attributes(module, redfish_obj, pending, rf_settings)
+ if reboot_required and job_id:
+ reset_success = reset_host(module, redfish_obj)
+ if not reset_success:
+ module.exit_json(status_msg="Attributes committed but reboot has failed {0}".format(HOST_RESTART_FAILED),
+ failed=True)
+ if module.params.get("job_wait"):
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(
+ redfish_obj, iDRAC_JOB_URI.format(job_id=job_id),
+ max_job_wait_sec=module.params.get('job_wait_timeout'))
+ if job_failed:
+ module.exit_json(failed=True, status_msg=msg, job_id=job_id)
+ module.exit_json(status_msg=SUCCESS_COMPLETE, job_id=job_id, msg=strip_substr_dict(job_dict), changed=True)
+ else:
+ module.exit_json(status_msg=SCHEDULED_SUCCESS, job_id=job_id, changed=True)
+ module.exit_json(status_msg=COMMITTED_SUCCESS.format(module.params.get('apply_time')),
+ job_id=job_id, changed=True)
+
+
+def main():
+ specs = {
+ "share_name": {"type": 'str'},
+ "share_user": {"type": 'str'},
+ "share_password": {"type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"type": 'str'},
+ "attributes": {"type": 'dict'},
+ "boot_sources": {"type": 'list', 'elements': 'raw'},
+ "apply_time": {"type": 'str', "default": 'Immediate',
+ "choices": ['Immediate', 'OnReset', 'AtMaintenanceWindowStart', 'InMaintenanceWindowOnReset']},
+ "maintenance_window": {"type": 'dict',
+ "options": {"start_time": {"type": 'str', "required": True},
+ "duration": {"type": 'int', "required": True}}},
+ "clear_pending": {"type": 'bool'},
+ "reset_bios": {"type": 'bool'},
+ "reset_type": {"type": 'str', "choices": ['graceful_restart', 'force_restart'], "default": 'graceful_restart'},
+ "job_wait": {"type": 'bool', "default": True},
+ "job_wait_timeout": {"type": 'int', "default": 1200}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[('boot_sources', 'attributes', 'clear_pending', 'reset_bios')],
+ required_one_of=[('boot_sources', 'attributes', 'clear_pending', 'reset_bios')],
+ required_if=[["apply_time", "AtMaintenanceWindowStart", ("maintenance_window",)],
+ ["apply_time", "InMaintenanceWindowOnReset", ("maintenance_window",)]],
+ supports_check_mode=True)
+ try:
+ msg = {}
+ if module.params.get("boot_sources") is not None:
+ with iDRACConnection(module.params) as idrac:
+ msg = run_server_bios_config(idrac, module)
+ changed, failed = False, False
+ if msg.get('Status') == "Success":
+ changed = True
+ if msg.get('Message') == "No changes found to commit!":
+ changed = False
+ elif msg.get('Status') == "Failed":
+ failed = True
+ module.exit_json(msg=msg, changed=changed, failed=failed)
+ else:
+ with iDRACRedfishAPI(module.params, req_session=True) as redfish_obj:
+ if module.params.get("clear_pending"):
+ clear_pending_bios(module, redfish_obj)
+ if module.params.get("reset_bios"):
+ reset_bios(module, redfish_obj)
+ if module.params.get('attributes'):
+ attributes_config(module, redfish_obj)
+ module.exit_json(status_msg=NO_CHANGES_MSG)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py
new file mode 100644
index 000000000..ad563c5ce
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 6.1.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_boot
+short_description: Configure the boot order settings.
+version_added: "6.1.0"
+description:
+ - This module allows to configure the boot order settings.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ boot_options:
+ type: list
+ elements: dict
+ description:
+ - Options to enable or disable the boot devices.
+ - This is mutually exclusive with I(boot_order), I(boot_source_override_mode), I(boot_source_override_enabled)
+ I(boot_source_override_target), and I(uefi_target_boot_source_override).
+ suboptions:
+ boot_option_reference:
+ type: str
+ description:
+ - FQDD of the boot device.
+ - This is mutually exclusive with I(display_name).
+ display_name:
+ type: str
+ description:
+ - Display name of the boot source device.
+ - This is mutually exclusive with I(boot_option_reference).
+ enabled:
+ type: bool
+ required: true
+ description: Enable or disable the boot device.
+ boot_order:
+ type: list
+ elements: str
+ description:
+ - This option allows to set the boot devices in the required boot order sequences.
+ - This is mutually exclusive with I(boot_options).
+ boot_source_override_mode:
+ type: str
+ description:
+ - The BIOS boot mode (either Legacy or UEFI) to be used when I(boot_source_override_target)
+ boot source is booted from.
+ - C(legacy) The system boot in non-UEFI(Legacy) boot mode to the I(boot_source_override_target).
+ - C(uefi) The system boot in UEFI boot mode to the I(boot_source_override_target).
+ - This is mutually exclusive with I(boot_options).
+ choices: [legacy, uefi]
+ boot_source_override_enabled:
+ type: str
+ description:
+ - The state of the Boot Source Override feature.
+ - C(disabled) The system boots normally.
+ - C(once) The system boots (one time) to the I(boot_source_override_target).
+ - C(continuous) The system boots to the target specified in the I(boot_source_override_target)
+ until this property is set to Disabled.
+ - The state is set to C(once) for the one-time boot override and C(continuous) for the
+ remain-active-until—canceled override. If the state is set C(once), the value is reset
+ to C(disabled) after the I(boot_source_override_target) actions have completed successfully.
+ - Changes to this options do not alter the BIOS persistent boot order configuration.
+ - This is mutually exclusive with I(boot_options).
+ choices: [continuous, disabled, once]
+ boot_source_override_target:
+ type: str
+ description:
+ - The boot source override target device to use during the next boot instead of the normal boot device.
+ - C(pxe) performs PXE boot from the primary NIC.
+ - C(floppy), C(cd), C(hdd), C(sd_card) performs boot from their devices respectively.
+ - C(bios_setup) performs boot into the native BIOS setup.
+ - C(utilities) performs boot from the local utilities.
+ - C(uefi_target) performs boot from the UEFI device path found in I(uefi_target_boot_source_override).
+ - If the I(boot_source_override_target) is set to a value other than C(none) then the
+ I(boot_source_override_enabled) is automatically set to C(once).
+ - Changes to this options do not alter the BIOS persistent boot order configuration.
+ - This is mutually exclusive with I(boot_options).
+ choices: [uefi_http, sd_card, uefi_target, utilities, bios_setup, hdd, cd, floppy, pxe, none]
+ uefi_target_boot_source_override:
+ type: str
+ description:
+ - The UEFI device path of the device from which to boot when I(boot_source_override_target) is C(uefi_target).
+ - I(boot_source_override_enabled) cannot be set to c(continuous) if I(boot_source_override_target)
+ set to C(uefi_target) because this settings is defined in UEFI as a one-time-boot setting.
+ - Changes to this options do not alter the BIOS persistent boot order configuration.
+ - This is required if I(boot_source_override_target) is C(uefi_target).
+ - This is mutually exclusive with I(boot_options).
+ reset_type:
+ type: str
+ description:
+ - C(none) Host system is not rebooted and I(job_wait) is not applicable.
+ - C(force_reset) Forcefully reboot the Host system.
+ - C(graceful_reset) Gracefully reboot the Host system.
+ choices: [graceful_restart, force_restart, none]
+ default: graceful_restart
+ job_wait:
+ type: bool
+ description:
+ - Provides the option to wait for job completion.
+ - This is applicable when I(reset_type) is C(force_reset) or C(graceful_reset).
+ default: true
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ default: 900
+ resource_id:
+ type: str
+ description: Redfish ID of the resource.
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports C(check_mode).
+"""
+
+
+EXAMPLES = """
+---
+- name: Configure the system boot options settings.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_options:
+ - display_name: Hard drive C
+ enabled: true
+ - boot_option_reference: NIC.PxeDevice.2-1
+ enabled: true
+
+- name: Configure the boot order settings.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_order:
+ - Boot0001
+ - Boot0002
+ - Boot0004
+ - Boot0003
+
+- name: Configure the boot source override mode.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_source_override_mode: legacy
+ boot_source_override_target: cd
+ boot_source_override_enabled: once
+
+- name: Configure the UEFI target settings.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_source_override_mode: uefi
+ boot_source_override_target: uefi_target
+ uefi_target_boot_source_override: "VenHw(3A191845-5F86-4E78-8FCE-C4CFF59F9DAA)"
+
+- name: Configure the boot source override mode as pxe.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_source_override_mode: legacy
+ boot_source_override_target: pxe
+ boot_source_override_enabled: continuous
+"""
+
+
+RETURN = r'''
+---
+msg:
+ description: Successfully updated the boot settings.
+ returned: success
+ type: str
+ sample: Successfully updated the boot settings.
+job:
+ description: Configured job details.
+ returned: success
+ type: dict
+ sample: {
+ "ActualRunningStartTime": "2019-06-19T00:57:24",
+ "ActualRunningStopTime": "2019-06-19T01:00:27",
+ "CompletionTime": "2019-06-19T01:00:27",
+ "Description": "Job Instance",
+ "EndTime": "TIME_NA",
+ "Id": "JID_609237056489",
+ "JobState": "Completed",
+ "JobType": "BIOSConfiguration",
+ "Message": "Job completed successfully.",
+ "MessageArgs": [],
+ "MessageId": "PR19",
+ "Name": "Configure: BIOS.Setup.1-1",
+ "PercentComplete": 100,
+ "StartTime": "2019-06-19T00:55:05",
+ "TargetSettingsURI": null }
+boot:
+ description: Configured boot settings details.
+ returned: success
+ type: dict
+ sample: {
+ "BootOptions": {
+ "Description": "Collection of BootOptions",
+ "Members": [{
+ "BootOptionEnabled": false,
+ "BootOptionReference": "HardDisk.List.1-1",
+ "Description": "Current settings of the Legacy Boot option",
+ "DisplayName": "Hard drive C:",
+ "Id": "HardDisk.List.1-1",
+ "Name": "Legacy Boot option",
+ "UefiDevicePath": "VenHw(D6C0639F-C705-4EB9-AA4F-5802D8823DE6)"}],
+ "Name": "Boot Options Collection"
+ },
+ "BootOrder": [ "HardDisk.List.1-1"],
+ "BootSourceOverrideEnabled": "Disabled",
+ "BootSourceOverrideMode": "Legacy",
+ "BootSourceOverrideTarget": "None",
+ "UefiTargetBootSourceOverride": null }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+import time
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (strip_substr_dict, idrac_system_reset,
+ get_system_res_id,
+ wait_for_idrac_job_completion)
+from ansible.module_utils.basic import AnsibleModule
+
+SYSTEM_URI = "/redfish/v1/Systems"
+BOOT_OPTIONS_URI = "/redfish/v1/Systems/{0}/BootOptions?$expand=*($levels=1)"
+JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs?$expand=*($levels=1)"
+JOB_URI_ID = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{0}"
+BOOT_SEQ_URI = "/redfish/v1/Systems/{0}/BootSources"
+PATCH_BOOT_SEQ_URI = "/redfish/v1/Systems/{0}/BootSources/Settings"
+
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_MSG = "Changes found to be applied."
+JOB_EXISTS = "Unable to complete the request because the BIOS configuration job already " \
+ "exists. Wait for the pending job to complete."
+BOOT_OPT_ERROR_MSG = "{0} boot_options provided."
+INVALID_BOOT_OPT = "{0} boot order reference provided."
+SUCCESS_MSG = "Successfully updated the boot settings."
+FAILED_MSG = "Failed to update the boot settings."
+UNSUPPORTED_MSG = "The system does not support the BootOptions feature."
+JOB_WAIT_MSG = "The boot settings job is triggered successfully."
+AUTH_ERROR_MSG = "Unable to communicate with iDRAC {0}. This may be due to one of the following: " \
+ "Incorrect username or password, unreachable iDRAC IP or a failure in TLS/SSL handshake."
+
+BS_OVERRIDE_MODE = {"legacy": "Legacy", "uefi": "UEFI"}
+BS_OVERRIDE_ENABLED = {"continuous": "Continuous", "disabled": "Disabled", "once": "Once"}
+BS_OVERRIDE_TARGET = {"none": "None", "pxe": "Pxe", "floppy": "Floppy", "cd": "Cd",
+ "hdd": "Hdd", "bios_setup": "BiosSetup", "utilities": "Utilities",
+ "uefi_target": "UefiTarget", "sd_card": "SDCard", "uefi_http": "UefiHttp"}
+RESET_TYPE = {"graceful_restart": "GracefulRestart", "force_restart": "ForceRestart", "none": None}
+
+
+def get_response_attributes(module, idrac, res_id):
+ resp = idrac.invoke_request("{0}/{1}".format(SYSTEM_URI, res_id), "GET")
+ resp_data = resp.json_data["Boot"]
+ resp_data.pop("Certificates", None)
+ resp_data.pop("BootOrder@odata.count", None)
+ resp_data.pop("BootSourceOverrideTarget@Redfish.AllowableValues", None)
+ if resp_data.get("BootOptions") is None and module.params.get("boot_options") is not None:
+ module.fail_json(msg=UNSUPPORTED_MSG)
+ if resp.json_data.get("Actions") is not None:
+ type_reset = resp.json_data["Actions"]["#ComputerSystem.Reset"]["ResetType@Redfish.AllowableValues"]
+ if "GracefulRestart" not in type_reset:
+ RESET_TYPE["graceful_restart"] = "ForceRestart"
+ return resp_data
+
+
+def get_existing_boot_options(idrac, res_id):
+ resp = idrac.invoke_request(BOOT_OPTIONS_URI.format(res_id), "GET")
+ resp_data = strip_substr_dict(resp.json_data)
+ strip_members = []
+ for each in resp_data["Members"]:
+ strip_members.append(strip_substr_dict(each))
+ resp_data["Members"] = strip_members
+ return resp_data
+
+
+def system_reset(module, idrac, res_id):
+ reset_msg, track_failed, reset, reset_type, job_resp = "", False, True, module.params.get("reset_type"), {}
+ if reset_type is not None and not reset_type == "none":
+ data = {"ResetType": RESET_TYPE[reset_type]}
+ reset, track_failed, reset_msg, job_resp = idrac_system_reset(idrac, res_id, payload=data, job_wait=True)
+ if RESET_TYPE["graceful_restart"] == "ForceRestart":
+ reset = True
+ if reset_type == "force_restart" and RESET_TYPE["graceful_restart"] == "GracefulRestart":
+ reset = True
+ return reset, track_failed, reset_msg, job_resp
+
+
+def get_scheduled_job(idrac, job_state=None):
+ if job_state is None:
+ job_state = ["Scheduled", "New", "Running"]
+ is_job, job_type_name, progress_job = False, "BIOSConfiguration", []
+ time.sleep(10)
+ job_resp = idrac.invoke_request(JOB_URI, "GET")
+ job_resp_member = job_resp.json_data["Members"]
+ if job_resp_member:
+ bios_config_job = list(filter(lambda d: d.get("JobType") in [job_type_name], job_resp_member))
+ progress_job = list(filter(lambda d: d.get("JobState") in job_state, bios_config_job))
+ if progress_job:
+ is_job = True
+ return is_job, progress_job
+
+
+def configure_boot_options(module, idrac, res_id, payload):
+ is_job, progress_job = get_scheduled_job(idrac)
+ job_data, job_wait = {}, module.params["job_wait"]
+ resp_data = get_response_attributes(module, idrac, res_id)
+ override_mode = resp_data["BootSourceOverrideMode"]
+ if module.params["reset_type"] == "none":
+ job_wait = False
+ if is_job:
+ module.fail_json(msg=JOB_EXISTS)
+ boot_seq_resp = idrac.invoke_request(BOOT_SEQ_URI.format(res_id), "GET")
+ seq_key = "BootSeq" if override_mode == "Legacy" else "UefiBootSeq"
+ boot_seq_data = boot_seq_resp.json_data["Attributes"][seq_key]
+ [each.update({"Enabled": payload.get(each["Name"])}
+ ) for each in boot_seq_data if payload.get(each["Name"]) is not None]
+ seq_payload = {"Attributes": {seq_key: boot_seq_data}, "@Redfish.SettingsApplyTime": {"ApplyTime": "OnReset"}}
+ if seq_key == "UefiBootSeq":
+ for i in range(len(boot_seq_data)):
+ if payload.get(resp_data["BootOrder"][i]) is not None:
+ boot_seq_data[i].update({"Enabled": payload.get(resp_data["BootOrder"][i])})
+ seq_payload["Attributes"][seq_key] = boot_seq_data
+ resp = idrac.invoke_request(PATCH_BOOT_SEQ_URI.format(res_id), "PATCH", data=seq_payload)
+ if resp.status_code == 202:
+ location = resp.headers["Location"]
+ job_id = location.split("/")[-1]
+ reset, track_failed, reset_msg, reset_job_resp = system_reset(module, idrac, res_id)
+ if reset_job_resp:
+ job_data = reset_job_resp.json_data
+ if reset:
+ job_resp, error_msg = wait_for_idrac_job_completion(idrac, JOB_URI_ID.format(job_id),
+ job_wait=job_wait,
+ wait_timeout=module.params["job_wait_timeout"])
+ if error_msg:
+ module.fail_json(msg=error_msg)
+ job_data = job_resp.json_data
+ else:
+ module.fail_json(msg=reset_msg)
+ return job_data
+
+
+def apply_boot_settings(module, idrac, payload, res_id):
+ job_data, job_wait = {}, module.params["job_wait"]
+ if module.params["reset_type"] == "none":
+ job_wait = False
+ resp = idrac.invoke_request("{0}/{1}".format(SYSTEM_URI, res_id), "PATCH", data=payload)
+ if resp.status_code == 200:
+ reset, track_failed, reset_msg, reset_job_resp = system_reset(module, idrac, res_id)
+ if reset_job_resp:
+ job_data = reset_job_resp.json_data
+ is_job, progress_job = get_scheduled_job(idrac)
+ if is_job:
+ if reset:
+ job_resp, error_msg = wait_for_idrac_job_completion(idrac, JOB_URI_ID.format(progress_job[0]["Id"]),
+ job_wait=job_wait,
+ wait_timeout=module.params["job_wait_timeout"])
+ if error_msg:
+ module.fail_json(msg=error_msg)
+ job_data = job_resp.json_data
+ else:
+ module.fail_json(msg=reset_msg)
+ return job_data
+
+
+def configure_boot_settings(module, idrac, res_id):
+ job_resp, diff_change, payload = {}, [], {"Boot": {}}
+ boot_order = module.params.get("boot_order")
+ override_mode = module.params.get("boot_source_override_mode")
+ override_enabled = module.params.get("boot_source_override_enabled")
+ override_target = module.params.get("boot_source_override_target")
+ response = get_response_attributes(module, idrac, res_id)
+ if boot_order is not None:
+ exist_boot_order = response.get("BootOrder")
+ invalid_boot_order = [bo for bo in boot_order if bo not in exist_boot_order]
+ if invalid_boot_order:
+ module.fail_json(msg=INVALID_BOOT_OPT.format("Invalid"), invalid_boot_order=invalid_boot_order)
+ if not len(set(boot_order)) == len(boot_order):
+ dup_order = boot_order[:]
+ [dup_order.remove(bo) for bo in exist_boot_order if bo in dup_order]
+ module.fail_json(msg=INVALID_BOOT_OPT.format("Duplicate"),
+ duplicate_boot_order=dup_order)
+ if not len(boot_order) == len(exist_boot_order):
+ module.fail_json(msg="Unable to complete the operation because all boot devices "
+ "are required for this operation.")
+ if not boot_order == exist_boot_order:
+ payload["Boot"].update({"BootOrder": boot_order})
+ if override_mode is not None and \
+ (not BS_OVERRIDE_MODE.get(override_mode) == response.get("BootSourceOverrideMode")):
+ payload["Boot"].update({"BootSourceOverrideMode": BS_OVERRIDE_MODE.get(override_mode)})
+ if override_enabled is not None and \
+ (not BS_OVERRIDE_ENABLED.get(override_enabled) == response.get("BootSourceOverrideEnabled")):
+ payload["Boot"].update({"BootSourceOverrideEnabled": BS_OVERRIDE_ENABLED.get(override_enabled)})
+ if override_target is not None and \
+ (not BS_OVERRIDE_TARGET.get(override_target) == response.get("BootSourceOverrideTarget")):
+ payload["Boot"].update({"BootSourceOverrideTarget": BS_OVERRIDE_TARGET.get(override_target)})
+ uefi_override_target = module.params.get("uefi_target_boot_source_override")
+ if override_target == "uefi_target" and not uefi_override_target == response.get("UefiTargetBootSourceOverride"):
+ payload["Boot"].update({"UefiTargetBootSourceOverride": uefi_override_target})
+ if module.check_mode and payload["Boot"]:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ elif (module.check_mode or not module.check_mode) and not payload["Boot"]:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ else:
+ job_resp = apply_boot_settings(module, idrac, payload, res_id)
+ return job_resp
+
+
+def configure_idrac_boot(module, idrac, res_id):
+ boot_options = module.params.get("boot_options")
+ inv_boot_options, diff_change, payload, job_resp, boot_attr = [], [], {}, {}, {}
+ if boot_options is not None:
+ boot_option_data = get_existing_boot_options(idrac, res_id)
+ for each in boot_options:
+ attr_val = each["display_name"] if each.get("display_name") is not None else each.get("boot_option_reference")
+ attr_key = "DisplayName" if each.get("display_name") is not None else "BootOptionReference"
+ report = list(filter(lambda d: d[attr_key] in [attr_val], boot_option_data["Members"]))
+ if not report:
+ inv_boot_options.append(each)
+ else:
+ act_val = {"BootOptionEnabled": each["enabled"]}
+ ext_val = {"BootOptionEnabled": report[0]["BootOptionEnabled"]}
+ diff_change.append(bool(set(ext_val.items()) ^ set(act_val.items())))
+ payload[report[0]["Id"]] = each["enabled"]
+ if inv_boot_options:
+ module.fail_json(msg=BOOT_OPT_ERROR_MSG.format("Invalid"), invalid_boot_options=inv_boot_options)
+ if not len(payload) == len(boot_options):
+ module.fail_json(msg=BOOT_OPT_ERROR_MSG.format("Duplicate"), duplicate_boot_options=boot_options)
+ if module.check_mode and any(diff_change) is True:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ elif (module.check_mode and all(diff_change) is False) or (not module.check_mode and not any(diff_change)):
+ module.exit_json(msg=NO_CHANGES_MSG)
+ else:
+ job_resp = configure_boot_options(module, idrac, res_id, payload)
+ else:
+ job_resp = configure_boot_settings(module, idrac, res_id)
+ return job_resp
+
+
+def main():
+ specs = {
+ "boot_options": {
+ "required": False, "type": "list", "elements": "dict",
+ "options": {
+ "boot_option_reference": {"required": False, "type": "str"},
+ "display_name": {"required": False, "type": "str"},
+ "enabled": {"required": True, "type": "bool"},
+ },
+ "mutually_exclusive": [("boot_option_reference", "display_name")],
+ "required_one_of": [("boot_option_reference", "display_name")],
+ },
+ "boot_order": {"required": False, "type": "list", "elements": "str"},
+ "boot_source_override_mode": {"required": False, "type": "str", "choices": ["legacy", "uefi"]},
+ "boot_source_override_enabled": {"required": False, "type": "str",
+ "choices": ["continuous", "disabled", "once"]},
+ "boot_source_override_target": {"required": False, "type": "str",
+ "choices": ["uefi_http", "sd_card", "uefi_target", "utilities", "bios_setup",
+ "hdd", "cd", "floppy", "pxe", "none"]},
+ "uefi_target_boot_source_override": {"required": False, "type": "str"},
+ "reset_type": {"required": False, "type": "str", "default": "graceful_restart",
+ "choices": ["graceful_restart", "force_restart", "none"]},
+ "job_wait": {"required": False, "type": "bool", "default": True},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 900},
+ "resource_id": {"required": False, "type": "str"}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[["boot_options", "boot_order", "boot_source_override_mode",
+ "boot_source_override_enabled", "boot_source_override_target",
+ "uefi_target_boot_source_override"]],
+ mutually_exclusive=[
+ ("boot_options", "boot_order"), ("boot_options", "boot_source_override_mode"),
+ ("boot_options", "boot_source_override_enabled"), ("boot_options", "boot_source_override_target"),
+ ("boot_options", "uefi_target_boot_source_override")
+ ],
+ required_if=[
+ ["boot_source_override_target", "uefi_target", ("uefi_target_boot_source_override",)],
+ ],
+ supports_check_mode=True,
+ )
+ try:
+ with iDRACRedfishAPI(module.params, req_session=True) as idrac:
+ res_id = module.params.get("resource_id")
+ if not res_id:
+ res_id, error_msg = get_system_res_id(idrac)
+ if error_msg:
+ module.fail_json(msg=error_msg)
+ job_resp = configure_idrac_boot(module, idrac, res_id)
+ job_resp_data = strip_substr_dict(job_resp)
+ boot_option_data = get_existing_boot_options(idrac, res_id)
+ boot_attr = get_response_attributes(module, idrac, res_id)
+ boot_attr["BootOptions"] = boot_option_data
+ if job_resp_data and \
+ (job_resp_data.get("JobState") in ["Failed", "RebootFailed"] or
+ "failed" in job_resp_data.get("Message").lower()):
+ module.fail_json(msg=FAILED_MSG, job=job_resp_data)
+ if (not module.params["job_wait"] or module.params["reset_type"] == "none") and \
+ not job_resp_data.get("JobState") == "RebootCompleted":
+ module.exit_json(msg=JOB_WAIT_MSG, job=job_resp_data, boot=boot_attr)
+ module.exit_json(msg=SUCCESS_MSG, job=job_resp_data, boot=boot_attr, changed=True)
+ except HTTPError as err:
+ if err.code == 401:
+ module.fail_json(msg=AUTH_ERROR_MSG.format(module.params["idrac_ip"]))
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=AUTH_ERROR_MSG.format(module.params["idrac_ip"]), unreachable=True)
+ except (ImportError, ValueError, RuntimeError, SSLValidationError,
+ ConnectionError, KeyError, TypeError, IndexError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py
new file mode 100644
index 000000000..f5471a3ad
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.5.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_certificates
+short_description: Configure certificates for iDRAC
+version_added: "5.5.0"
+description:
+ - This module allows to generate certificate signing request, import, and export certificates on iDRAC.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ command:
+ description:
+ - "C(generate_csr), generate CSR. This requires I(cert_params) and I(certificate_path).
+ This is applicable only for C(HTTPS)"
+ - C(import), import the certificate file. This requires I(certificate_path).
+ - C(export), export the certificate. This requires I(certificate_path).
+ - C(reset), reset the certificate to default settings. This is applicable only for C(HTTPS).
+ type: str
+ choices: ['import', 'export', 'generate_csr', 'reset']
+ default: 'generate_csr'
+ certificate_type:
+ description:
+ - Type of the iDRAC certificate.
+ - C(HTTPS) The Dell self-signed SSL certificate.
+ - C(CA) Certificate Authority(CA) signed SSL certificate.
+ - C(CSC) The custom signed SSL certificate.
+ - C(CLIENT_TRUST_CERTIFICATE) Client trust certificate.
+ type: str
+ choices: ['HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE']
+ default: 'HTTPS'
+ certificate_path:
+ description:
+ - Absolute path of the certificate file if I(command) is C(import).
+ - Directory path with write permissions if I(command) is C(generate_csr) or C(export).
+ type: path
+ passphrase:
+ description: The passphrase string if the certificate to be imported is passphrase protected.
+ type: str
+ cert_params:
+ description: Certificate parameters to generate signing request.
+ type: dict
+ suboptions:
+ common_name:
+ description: The common name of the certificate.
+ type: str
+ required: True
+ organization_unit:
+ description: The name associated with an organizational unit. For example department name.
+ type: str
+ required: True
+ locality_name:
+ description: The city or other location where the entity applying for certification is located.
+ type: str
+ required: True
+ state_name:
+ description: The state where the entity applying for certification is located.
+ type: str
+ required: True
+ country_code:
+ description: The country code of the country where the entity applying for certification is located.
+ type: str
+ required: True
+ email_address:
+ description: The email associated with the CSR.
+ type: str
+ required: True
+ organization_name:
+ description: The name associated with an organization.
+ type: str
+ required: True
+ subject_alt_name:
+ description: The alternative domain names associated with the request.
+ type: list
+ elements: str
+ default: []
+ resource_id:
+ description: Redfish ID of the resource.
+ type: str
+ reset:
+ description:
+ - To reset the iDRAC after the certificate operation.
+ - This is applicable when I(command) is C(import) or C(reset).
+ type: bool
+ default: True
+ wait:
+ description:
+ - Maximum wait time for iDRAC to start after the reset, in seconds.
+ - This is applicable when I(command) is C(import) or C(reset) and I(reset) is C(True).
+ type: int
+ default: 300
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - The certificate operations are supported on iDRAC firmware 5.10.10.00 and above.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Generate HTTPS certificate signing request
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "generate_csr"
+ certificate_type: "HTTPS"
+ certificate_path: "/home/omam/mycerts"
+ cert_params:
+ common_name: "sample.domain.com"
+ organization_unit: "OrgUnit"
+ locality_name: "Bangalore"
+ state_name: "Karnataka"
+ country_code: "IN"
+ email_address: "admin@domain.com"
+ organization_name: "OrgName"
+ subject_alt_name:
+ - 192.198.2.1
+
+- name: Import a HTTPS certificate.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "/path/to/cert.pem"
+
+- name: Export a HTTPS certificate.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "export"
+ certificate_type: "HTTPS"
+ certificate_path: "/home/omam/mycert_dir"
+
+- name: Import a CSC certificate.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "CSC"
+ certificate_path: "/path/to/cert.pem"
+
+- name: Export a Client trust certificate.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "export"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "/home/omam/mycert_dir"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the certificate configuration operation.
+ returned: always
+ sample: "Successfully performed the operation generate_csr."
+certificate_path:
+ type: str
+ description: The csr or exported certificate file path
+ returned: when I(command) is C(export) or C(generate_csr)
+ sample: "/home/ansible/myfiles/cert.pem"
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import base64
+import os
+from datetime import datetime
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import reset_idrac
+
+NOT_SUPPORTED_ACTION = "Certificate {op} not supported for the specified certificate type {certype}."
+SUCCESS_MSG = "Successfully performed the '{command}' operation."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_MSG = "Changes found to be applied."
+SYSTEM_ID = "System.Embedded.1"
+MANAGER_ID = "iDRAC.Embedded.1"
+ACTIONS_PFIX = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService."
+SYSTEMS_URI = "/redfish/v1/Systems"
+MANAGERS_URI = "/redfish/v1/Managers"
+IDRAC_SERVICE = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService"
+CSR_SSL = "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR"
+IMPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate"
+EXPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate"
+RESET_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg"
+IDRAC_RESET = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset"
+
+idrac_service_actions = {
+ "#DelliDRACCardService.DeleteCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.DeleteCertificate",
+ "#DelliDRACCardService.ExportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportCertificate",
+ "#DelliDRACCardService.ExportSSLCertificate": EXPORT_SSL,
+ "#DelliDRACCardService.FactoryIdentityCertificateGenerateCSR":
+ "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR",
+ "#DelliDRACCardService.FactoryIdentityExportCertificate":
+ "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityExportCertificate",
+ "#DelliDRACCardService.FactoryIdentityImportCertificate":
+ "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityImportCertificate",
+ "#DelliDRACCardService.GenerateSEKMCSR": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.GenerateSEKMCSR",
+ "#DelliDRACCardService.ImportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportCertificate",
+ "#DelliDRACCardService.ImportSSLCertificate": IMPORT_SSL,
+ "#DelliDRACCardService.SSLResetCfg": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg",
+ "#DelliDRACCardService.iDRACReset": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.iDRACReset"
+}
+
+rfish_cert_coll = {'Server': {
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/NetworkProtocol/HTTPS/Certificates"
+}}
+out_mapper = {}
+out_file_path = {"CSRString": 'certificate_path',
+ "CertificateFile": 'certificate_path'}
+changed_map = {"generate_csr": False, "import": True, "export": False, "reset": True}
+# reset_map = {"generate_csr": False, "import": True, "export": False, "reset": True}
+csr_transform = {"common_name": "CommonName",
+ "organization_unit": "OrganizationalUnit",
+ "locality_name": 'City',
+ "state_name": 'State',
+ "country_code": "Country",
+ "email_address": 'Email',
+ "organization_name": "Organization",
+ "subject_alt_name": 'AlternativeNames'}
+action_url_map = {"generate_csr": {},
+ "import": {'Server': "#DelliDRACCardService.ImportSSLCertificate",
+ 'CA': "#DelliDRACCardService.ImportSSLCertificate",
+ 'CSC': "#DelliDRACCardService.ImportSSLCertificate",
+ 'ClientTrustCertificate': "#DelliDRACCardService.ImportSSLCertificate"},
+ "export": {'Server': "#DelliDRACCardService.ExportSSLCertificate",
+ 'CA': "#DelliDRACCardService.ExportSSLCertificate",
+ 'CSC': "#DelliDRACCardService.ExportSSLCertificate",
+ 'ClientTrustCertificate': "#DelliDRACCardService.ExportSSLCertificate"},
+ "reset": {'Server': "#DelliDRACCardService.SSLResetCfg"}}
+
+dflt_url_map = {"generate_csr": {'Server': CSR_SSL},
+ "import": {'Server': IMPORT_SSL,
+ 'CA': IMPORT_SSL,
+ 'CSC': IMPORT_SSL,
+ 'ClientTrustCertificate': IMPORT_SSL},
+ "export": {'Server': EXPORT_SSL,
+ 'CA': EXPORT_SSL,
+ 'CSC': EXPORT_SSL,
+ 'ClientTrustCertificate': EXPORT_SSL},
+ "reset": {'Server': RESET_SSL}}
+certype_map = {'HTTPS': "Server", 'CA': "CA", 'CSC': "CSC",
+ 'CLIENT_TRUST_CERTIFICATE': "ClientTrustCertificate"}
+
+
+def get_ssl_payload(module, op, certype):
+ payload = {}
+ method = 'POST'
+ if op == 'import':
+ payload["CertificateType"] = certype
+ if module.params.get('passphrase'):
+ payload['Passphrase'] = module.params.get('passphrase')
+ fpath = module.params.get('certificate_path')
+ try:
+ if str(fpath).lower().endswith('.p12') or str(fpath).lower().endswith(
+ '.pfx'): # Linux generates .p12 Windows .pfx
+ with open(fpath, 'rb') as cert:
+ cert_content = cert.read()
+ cert_file = base64.encodebytes(cert_content).decode('ascii')
+ else:
+ with open(fpath, "r") as cert:
+ cert_file = cert.read()
+ except OSError as file_err:
+ module.exit_json(msg=str(file_err), failed=True)
+ payload['SSLCertificateFile'] = cert_file
+ elif op == 'export':
+ payload['SSLCertType'] = certype
+ elif op == 'generate_csr':
+ payload = {}
+ cert_params = module.params.get("cert_params")
+ for k, v in csr_transform.items():
+ payload[v] = cert_params.get(k)
+ if rfish_cert_coll.get(certype):
+ payload["CertificateCollection"] = rfish_cert_coll.get(certype)
+ elif op == 'reset':
+ payload = "{}"
+ return payload, method
+
+
+payload_map = {"Server": get_ssl_payload,
+ "CA": get_ssl_payload,
+ "CSC": get_ssl_payload,
+ "ClientTrustCertificate": get_ssl_payload}
+
+
+def get_res_id(idrac, certype):
+ cert_map = {"Server": MANAGER_ID}
+ try:
+ resp = idrac.invoke_request("GET", cert_map.get(certype, MANAGERS_URI))
+ membs = resp.json_data.get("Members")
+ res_uri = membs[0].get('@odata.id') # Getting the first item
+ res_id = res_uri.split("/")[-1]
+ except Exception:
+ res_id = cert_map.get(certype, MANAGER_ID)
+ return res_id
+
+
+def get_idrac_service(idrac, res_id):
+ srvc = IDRAC_SERVICE.format(res_id=res_id)
+ try:
+ resp = idrac.invoke_request('GET', "{0}/{1}".format(MANAGERS_URI, res_id))
+ srvc_data = resp.json_data
+ dell_srvc = srvc_data['Links']['Oem']['Dell']['DelliDRACCardService']
+ srvc = dell_srvc.get("@odata.id", IDRAC_SERVICE.format(res_id=res_id))
+ except Exception:
+ srvc = IDRAC_SERVICE.format(res_id=res_id)
+ return srvc
+
+
+def get_actions_map(idrac, idrac_service_uri):
+ actions = idrac_service_actions
+ try:
+ resp = idrac.invoke_request(idrac_service_uri, 'GET')
+ srvc_data = resp.json_data
+ actions = dict((k, v.get('target')) for k, v in srvc_data.get('Actions').items())
+ except Exception as exc:
+ actions = idrac_service_actions
+ return actions
+
+
+def get_cert_url(actions, op, certype, res_id):
+ idrac_key = action_url_map.get(op).get(certype)
+ dynurl = actions.get(idrac_key)
+ if not dynurl:
+ dynurl = dflt_url_map.get(op).get(certype)
+ if dynurl:
+ dynurl = dynurl.format(res_id=res_id)
+ return dynurl
+
+
+def certificate_action(module, idrac, actions, op, certype, res_id):
+ cert_url = get_cert_url(actions, op, certype, res_id)
+ if not cert_url:
+ module.exit_json(msg=NOT_SUPPORTED_ACTION.format(op=op, certype=module.params.get('certificate_type')))
+ cert_payload, method = payload_map.get(certype)(module, op, certype)
+ exit_certificates(module, idrac, cert_url, cert_payload, method, certype, res_id)
+
+
+def write_to_file(module, cert_data, dkey):
+ f_ext = {'HTTPS': ".pem", 'CA': ".pem", 'CSC': ".crt", 'CLIENT_TRUST_CERTIFICATE': ".crt"}
+ path = module.params.get('certificate_path')
+ if not (os.path.exists(path) or os.path.isdir(path)):
+ module.exit_json(msg="Provided directory path '{0}' is not valid.".format(path), failed=True)
+ if not os.access(path, os.W_OK):
+ module.exit_json(msg="Provided directory path '{0}' is not writable. Please check if you "
+ "have appropriate permissions.".format(path), failed=True)
+ d = datetime.now()
+ if module.params.get('command') == 'generate_csr':
+ ext = '.txt'
+ else:
+ ext = f_ext.get(module.params.get('certificate_type'))
+ cert_file_name = "{0}_{1}{2}{3}_{4}{5}{6}_{7}{8}".format(
+ module.params["idrac_ip"], d.date().year, d.date().month, d.date().day,
+ d.time().hour, d.time().minute, d.time().second, module.params.get('certificate_type'), ext)
+ file_name = os.path.join(path, cert_file_name)
+ write_data = cert_data.pop(dkey, None)
+ with open(file_name, "w") as fp:
+ fp.writelines(write_data)
+ cert_data[out_file_path.get(dkey)] = file_name
+
+
+def format_output(module, cert_data):
+ # cert_data = strip_substr_dict(cert_data, chkstr='@odata')
+ result = {}
+ cp = cert_data.copy()
+ klist = cp.keys()
+ for k in klist:
+ if "message" in k.lower():
+ cert_data.pop(k, None)
+ if k in out_mapper:
+ cert_data[out_mapper.get(k)] = cert_data.pop(k, None)
+ if k in out_file_path:
+ write_to_file(module, cert_data, k)
+ if result:
+ cert_data.update({'result': result})
+ cert_data.pop("CertificateCollection", None)
+ return cert_data
+
+
+def get_export_data(idrac, certype, res_id):
+ try:
+ resp = idrac.invoke_request(EXPORT_SSL.format(res_id=res_id), "POST", data={"SSLCertType": certype})
+ cert_data = resp.json_data
+ except Exception:
+ cert_data = {"CertificateFile": ""}
+ return cert_data.get("CertificateFile")
+
+
+def exit_certificates(module, idrac, cert_url, cert_payload, method, certype, res_id):
+ cmd = module.params.get('command')
+ changed = changed_map.get(cmd)
+ reset = changed_map.get(cmd) and module.params.get('reset')
+ result = {"changed": changed}
+ reset_msg = ""
+ if changed:
+ reset_msg = " Reset iDRAC to apply new certificate." \
+ " Until iDRAC is reset, the old certificate will be active."
+ if module.params.get('command') == 'import':
+ export_cert = get_export_data(idrac, certype, res_id)
+ if cert_payload.get('SSLCertificateFile') in export_cert:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode and changed:
+ module.exit_json(msg=CHANGES_MSG, changed=changed)
+ if module.params.get('command') == 'reset' and certype == "Server":
+ resp = idrac.invoke_request(cert_url, method, data=cert_payload, dump=False)
+ else:
+ resp = idrac.invoke_request(cert_url, method, data=cert_payload)
+ cert_data = resp.json_data
+ cert_output = format_output(module, cert_data)
+ result.update(cert_output)
+ if reset:
+ reset, track_failed, reset_msg = reset_idrac(idrac, module.params.get('wait'), res_id)
+ result['msg'] = "{0}{1}".format(SUCCESS_MSG.format(command=cmd), reset_msg)
+ module.exit_json(**result)
+
+
+def main():
+ specs = {
+ "command": {"type": 'str', "default": 'generate_csr',
+ "choices": ['generate_csr', 'export', 'import', 'reset']},
+ "certificate_type": {"type": 'str', "default": 'HTTPS',
+ "choices": ['HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE']},
+ "certificate_path": {"type": 'path'},
+ "passphrase": {"type": 'str', "no_log": True},
+ "cert_params": {"type": 'dict', "options": {
+ "common_name": {"type": 'str', "required": True},
+ "organization_unit": {"type": 'str', "required": True},
+ "locality_name": {"type": 'str', "required": True},
+ "state_name": {"type": 'str', "required": True},
+ "country_code": {"type": 'str', "required": True},
+ "email_address": {"type": 'str', "required": True},
+ "organization_name": {"type": 'str', "required": True},
+ "subject_alt_name": {"type": 'list', "elements": 'str', "default": []}
+ }},
+ "resource_id": {"type": 'str'},
+ "reset": {"type": 'bool', "default": True},
+ "wait": {"type": 'int', "default": 300}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ['command', 'generate_csr', ('cert_params', 'certificate_path',)],
+ ['command', 'import', ('certificate_path',)],
+ ['command', 'export', ('certificate_path',)]
+ ],
+ supports_check_mode=True)
+
+ try:
+ with iDRACRedfishAPI(module.params) as idrac:
+ certype = certype_map.get(module.params.get('certificate_type'))
+ op = module.params.get('command')
+ res_id = module.params.get('resource_id')
+ if not res_id:
+ res_id = get_res_id(idrac, certype)
+ idrac_service_uri = get_idrac_service(idrac, res_id)
+ actions_map = get_actions_map(idrac, idrac_service_uri)
+ certificate_action(module, idrac, actions_map, op, certype, res_id)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (ImportError, ValueError, RuntimeError, SSLValidationError,
+ ConnectionError, KeyError, TypeError, IndexError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py
new file mode 100644
index 000000000..e4d966345
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py
@@ -0,0 +1,651 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_firmware
+short_description: Firmware update from a repository on a network share (CIFS, NFS, HTTP, HTTPS, FTP)
+version_added: "2.1.0"
+description:
+ - Update the Firmware by connecting to a network share (CIFS, NFS, HTTP, HTTPS, FTP) that contains a catalog of
+ available updates.
+ - Network share should contain a valid repository of Update Packages (DUPs) and a catalog file describing the DUPs.
+ - All applicable updates contained in the repository are applied to the system.
+ - This feature is available only with iDRAC Enterprise License.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ share_name:
+ description: Network share path of update repository. CIFS, NFS, HTTP, HTTPS and FTP share types are supported.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ share_mnt:
+ description:
+ - Local mount path of the network share with read-write permission for ansible user.
+ - This option is not applicable for HTTP, HTTPS, and FTP shares.
+ type: str
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ default: True
+ catalog_file_name:
+ description: Catalog file name relative to the I(share_name).
+ type: str
+ default: 'Catalog.xml'
+ ignore_cert_warning:
+ description: Specifies if certificate warnings are ignored when HTTPS share is used.
+ If C(True) option is set, then the certificate warnings are ignored.
+ type: bool
+ default: True
+ apply_update:
+ description:
+ - If I(apply_update) is set to C(True), then the packages are applied.
+ - If I(apply_update) is set to C(False), no updates are applied, and a catalog report
+ of packages is generated and returned.
+ type: bool
+ default: True
+ reboot:
+ description:
+ - Provides the option to apply the update packages immediately or in the next reboot.
+ - If I(reboot) is set to C(True), then the packages are applied immediately.
+ - If I(reboot) is set to C(False), then the packages are staged and applied in the next reboot.
+ - Packages that do not require a reboot are applied immediately irrespective of I (reboot).
+ type: bool
+ default: False
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Rajeev Arakkal (@rajeevarakkal)"
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Module will report success based on the iDRAC firmware update parent job status if there are no individual
+ component jobs present.
+ - For server with iDRAC firmware 5.00.00.00 and later, if the repository contains unsupported packages, then the
+ module will return success with a proper message.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = """
+---
+- name: Update firmware from repository on a NFS Share
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.0:/share"
+ reboot: True
+ job_wait: True
+ apply_update: True
+ catalog_file_name: "Catalog.xml"
+
+- name: Update firmware from repository on a CIFS Share
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "full_cifs_path"
+ share_user: "share_user"
+ share_password: "share_password"
+ reboot: True
+ job_wait: True
+ apply_update: True
+ catalog_file_name: "Catalog.xml"
+
+- name: Update firmware from repository on a HTTP
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "http://downloads.dell.com"
+ reboot: True
+ job_wait: True
+ apply_update: True
+
+- name: Update firmware from repository on a HTTPS
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://downloads.dell.com"
+ reboot: True
+ job_wait: True
+ apply_update: True
+
+- name: Update firmware from repository on a FTP
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "ftp://ftp.dell.com"
+ reboot: True
+ job_wait: True
+ apply_update: True
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall firmware update status.
+ returned: always
+ sample: "Successfully updated the firmware."
+update_status:
+ type: dict
+ description: Firmware Update job and progress details from the iDRAC.
+ returned: success
+ sample: {
+ 'InstanceID': 'JID_XXXXXXXXXXXX',
+ 'JobState': 'Completed',
+ 'Message': 'Job completed successfully.',
+ 'MessageId': 'REDXXX',
+ 'Name': 'Repository Update',
+ 'JobStartTime': 'NA',
+ 'Status': 'Success',
+ }
+"""
+
+
+import os
+import json
+import time
+from ssl import SSLError
+from xml.etree import ElementTree as ET
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+try:
+ from omsdk.sdkcreds import UserCredentials
+ from omsdk.sdkfile import FileOnShare
+ from omsdk.http.sdkwsmanbase import WsManProtocolBase
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+SHARE_TYPE = {'nfs': 'NFS', 'cifs': 'CIFS', 'ftp': 'FTP',
+ 'http': 'HTTP', 'https': 'HTTPS', 'tftp': 'TFTP'}
+CERT_WARN = {True: 'On', False: 'Off'}
+IDRAC_PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService"
+PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService/Actions/" \
+ "DellSoftwareInstallationService.InstallFromRepository"
+GET_REPO_BASED_UPDATE_LIST_PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService/" \
+ "Actions/DellSoftwareInstallationService.GetRepoBasedUpdateList"
+JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}"
+iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+MESSAGE = "Firmware versions on server match catalog, applicable updates are not present in the repository."
+EXIT_MESSAGE = "The catalog in the repository specified in the operation has the same firmware versions " \
+ "as currently present on the server."
+IDEM_MSG_ID = "SUP029"
+REDFISH_VERSION = "3.30"
+INTERVAL = 30 # polling interval
+WAIT_COUNT = 240
+JOB_WAIT_MSG = 'Job wait timed out after {0} minutes'
+
+
+def wait_for_job_completion(module, job_uri, job_wait=False, reboot=False, apply_update=False):
+ track_counter = 0
+ response = {}
+ msg = None
+ while track_counter < 5:
+ try:
+ # For job_wait False return a valid response, try 5 times
+ with iDRACRedfishAPI(module.params) as redfish:
+ response = redfish.invoke_request(job_uri, "GET")
+ track_counter += 5
+ msg = None
+ except Exception as error_message:
+ msg = str(error_message)
+ track_counter += 1
+ time.sleep(10)
+ if track_counter < 5:
+ msg = None
+ # reset track counter
+ track_counter = 0
+ while job_wait and track_counter <= WAIT_COUNT:
+ try:
+ with iDRACRedfishAPI(module.params) as redfish:
+ response = redfish.invoke_request(job_uri, "GET")
+ job_state = response.json_data.get("JobState")
+ msg = None
+ except Exception as error_message:
+ msg = str(error_message)
+ track_counter += 2
+ time.sleep(INTERVAL)
+ else:
+ if response.json_data.get("PercentComplete") == 100 and job_state == "Completed": # apply now
+ break
+ if job_state in ["Starting", "Running", "Pending", "New"] and not reboot and apply_update: # apply on
+ break
+ track_counter += 1
+ time.sleep(INTERVAL)
+ if track_counter > WAIT_COUNT:
+ # TIMED OUT
+ msg = JOB_WAIT_MSG.format((WAIT_COUNT * INTERVAL) / 60)
+ return response, msg
+
+
+def _validate_catalog_file(catalog_file_name):
+ normilized_file_name = catalog_file_name.lower()
+ if not normilized_file_name:
+ raise ValueError('catalog_file_name should be a non-empty string.')
+ elif not normilized_file_name.endswith("xml"):
+ raise ValueError('catalog_file_name should be an XML file.')
+
+
+def get_check_mode_status(status, module):
+ if status['job_details']["Data"]["GetRepoBasedUpdateList_OUTPUT"].get("Message") == MESSAGE.rstrip(".") and \
+ status.get('JobStatus') == "Completed":
+ if module.check_mode:
+ module.exit_json(msg="No changes found to commit!")
+ module.exit_json(msg=EXIT_MESSAGE)
+
+
+def get_job_status(module, each_comp, idrac):
+ failed, each_comp['JobStatus'], each_comp['Message'] = False, None, None
+ job_wait = module.params['job_wait']
+ reboot = module.params['reboot']
+ apply_update = module.params['apply_update']
+ if each_comp.get("JobID") is not None:
+ if idrac:
+ resp = idrac.job_mgr.job_wait(each_comp.get("JobID"))
+ while reboot and apply_update:
+ resp = idrac.job_mgr.job_wait(each_comp.get("JobID"))
+ if resp.get("JobStatus") is not None and (not resp.get('JobStatus') == "Scheduled"):
+ break
+ each_comp['Message'] = resp.get('Message')
+ each_comp['JobStatus'] = "OK"
+ fail_words_lower = ['fail', 'invalid', 'unable', 'not', 'cancel']
+ if any(x in resp.get('Message').lower() for x in fail_words_lower):
+ each_comp['JobStatus'] = "Critical"
+ failed = True
+ else:
+ resp, msg = wait_for_job_completion(module, JOB_URI.format(job_id=each_comp.get("JobID")), job_wait, reboot,
+ apply_update)
+ if not msg:
+ resp_data = resp.json_data
+ if resp_data.get('Messages'):
+ each_comp['Message'] = resp_data.get('Messages')[0]['Message']
+ each_comp['JobStatus'] = resp_data.get('JobStatus')
+ if each_comp['JobStatus'] == "Critical":
+ failed = True
+ else:
+ failed = True
+ return each_comp, failed
+
+
+def _convert_xmltojson(module, job_details, idrac):
+ """get all the xml data from PackageList and returns as valid json."""
+ data, repo_status, failed_status = [], False, False
+ try:
+ xmldata = ET.fromstring(job_details['PackageList'])
+ for iname in xmldata.iter('INSTANCENAME'):
+ comp_data = dict([(attr.attrib['NAME'], txt.text) for attr in iname.iter("PROPERTY") for txt in attr])
+ component, failed = get_job_status(module, comp_data, idrac)
+ # get the any single component update failure and record the only very first failure on failed_status True
+ if not failed_status and failed:
+ failed_status = True
+ data.append(component)
+ repo_status = True
+ except ET.ParseError:
+ data = job_details['PackageList']
+ return data, repo_status, failed_status
+
+
+def get_jobid(module, resp):
+ """Get the Job ID from the response header."""
+ jobid = None
+ if resp.status_code == 202:
+ joburi = resp.headers.get('Location')
+ if joburi is None:
+ module.fail_json(msg="Failed to update firmware.")
+ jobid = joburi.split("/")[-1]
+ else:
+ module.fail_json(msg="Failed to update firmware.")
+ return jobid
+
+
+def handle_HTTP_error(module, httperr):
+ err_message = json.load(httperr)
+ err_list = err_message.get('error', {}).get('@Message.ExtendedInfo', [{"Message": EXIT_MESSAGE}])
+ if err_list:
+ err_reason = err_list[0].get("Message", EXIT_MESSAGE)
+ if IDEM_MSG_ID in err_list[0].get('MessageId'):
+ module.exit_json(msg=err_reason)
+ if "error" in err_message:
+ module.fail_json(msg=err_message)
+
+
+def update_firmware_url_redfish(module, idrac, share_name, apply_update, reboot, job_wait, payload, repo_urls):
+ """Update firmware through HTTP/HTTPS/FTP and return the job details."""
+ repo_url = urlparse(share_name)
+ job_details, status = {}, {}
+ ipaddr = repo_url.netloc
+ share_type = repo_url.scheme
+ sharename = repo_url.path.strip('/')
+ payload['IPAddress'] = ipaddr
+ if repo_url.path:
+ payload['ShareName'] = sharename
+ payload['ShareType'] = SHARE_TYPE[share_type]
+ install_url = PATH
+ get_repo_url = GET_REPO_BASED_UPDATE_LIST_PATH
+ actions = repo_urls.get('Actions')
+ if actions:
+ install_url = actions.get("#DellSoftwareInstallationService.InstallFromRepository", {}).get("target", PATH)
+ get_repo_url = actions.get("#DellSoftwareInstallationService.GetRepoBasedUpdateList", {}).\
+ get("target", GET_REPO_BASED_UPDATE_LIST_PATH)
+ resp = idrac.invoke_request(install_url, method="POST", data=payload)
+ job_id = get_jobid(module, resp)
+ resp, msg = wait_for_job_completion(module, JOB_URI.format(job_id=job_id), job_wait, reboot, apply_update)
+ if not msg:
+ status = resp.json_data
+ else:
+ status['update_msg'] = msg
+ try:
+ resp_repo_based_update_list = idrac.invoke_request(get_repo_url, method="POST", data="{}",
+ dump=False)
+ job_details = resp_repo_based_update_list.json_data
+ except HTTPError as err:
+ handle_HTTP_error(module, err)
+ raise err
+ return status, job_details
+
+
+def update_firmware_url_omsdk(module, idrac, share_name, catalog_file_name, apply_update, reboot,
+ ignore_cert_warning, job_wait, payload):
+ """Update firmware through HTTP/HTTPS/FTP and return the job details."""
+ repo_url = urlparse(share_name)
+ job_details, status = {}, {}
+ ipaddr = repo_url.netloc
+ share_type = repo_url.scheme
+ sharename = repo_url.path.strip('/')
+ if ipaddr == "downloads.dell.com":
+ status = idrac.update_mgr.update_from_dell_repo_url(ipaddress=ipaddr, share_type=share_type,
+ share_name=sharename, catalog_file=catalog_file_name,
+ apply_update=apply_update, reboot_needed=reboot,
+ ignore_cert_warning=ignore_cert_warning, job_wait=job_wait)
+ get_check_mode_status(status, module)
+ else:
+ status = idrac.update_mgr.update_from_repo_url(ipaddress=ipaddr, share_type=share_type,
+ share_name=sharename, catalog_file=catalog_file_name,
+ apply_update=apply_update, reboot_needed=reboot,
+ ignore_cert_warning=ignore_cert_warning, job_wait=job_wait)
+ get_check_mode_status(status, module)
+ return status, job_details
+
+
+def update_firmware_omsdk(idrac, module):
+ """Update firmware from a network share and return the job details."""
+ msg = {}
+ msg['changed'], msg['failed'], msg['update_status'] = False, False, {}
+ msg['update_msg'] = "Successfully triggered the job to update the firmware."
+ try:
+ share_name = module.params['share_name']
+ catalog_file_name = module.params['catalog_file_name']
+ share_user = module.params['share_user']
+ share_pwd = module.params['share_password']
+ reboot = module.params['reboot']
+ job_wait = module.params['job_wait']
+ ignore_cert_warning = module.params['ignore_cert_warning']
+ apply_update = module.params['apply_update']
+ payload = {"RebootNeeded": reboot, "CatalogFile": catalog_file_name, "ApplyUpdate": str(apply_update),
+ "IgnoreCertWarning": CERT_WARN[ignore_cert_warning]}
+ if share_user is not None:
+ payload['UserName'] = share_user
+ if share_pwd is not None:
+ payload['Password'] = share_pwd
+
+ if share_name.lower().startswith(('http://', 'https://', 'ftp://')):
+ msg['update_status'], job_details = update_firmware_url_omsdk(module, idrac, share_name, catalog_file_name,
+ apply_update, reboot, ignore_cert_warning,
+ job_wait, payload)
+ if job_details:
+ msg['update_status']['job_details'] = job_details
+ else:
+ upd_share = FileOnShare(remote="{0}{1}{2}".format(share_name, os.sep, catalog_file_name),
+ mount_point=module.params['share_mnt'], isFolder=False,
+ creds=UserCredentials(share_user, share_pwd))
+ msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share, apply_update=apply_update,
+ reboot_needed=reboot, job_wait=job_wait)
+ get_check_mode_status(msg['update_status'], module)
+
+ json_data, repo_status, failed = msg['update_status']['job_details'], False, False
+ if "PackageList" not in json_data:
+ job_data = json_data.get('Data')
+ pkglst = job_data['body'] if 'body' in job_data else job_data.get('GetRepoBasedUpdateList_OUTPUT')
+ if 'PackageList' in pkglst: # Returns from OMSDK
+ pkglst['PackageList'], repo_status, failed = _convert_xmltojson(module, pkglst, idrac)
+ else: # Redfish
+ json_data['PackageList'], repo_status, failed = _convert_xmltojson(module, json_data, None)
+
+ if not apply_update and not failed:
+ msg['update_msg'] = "Successfully fetched the applicable firmware update package list."
+ elif apply_update and not reboot and not job_wait and not failed:
+ msg['update_msg'] = "Successfully triggered the job to stage the firmware."
+ elif apply_update and job_wait and not reboot and not failed:
+ msg['update_msg'] = "Successfully staged the applicable firmware update packages."
+ msg['changed'] = True
+ elif apply_update and job_wait and not reboot and failed:
+ msg['update_msg'] = "Successfully staged the applicable firmware update packages with error(s)."
+ msg['failed'] = True
+
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+
+ if module.check_mode and not (json_data.get('PackageList') or json_data.get('Data')) and \
+ msg['update_status']['JobStatus'] == 'Completed':
+ module.exit_json(msg="No changes found to commit!")
+ elif module.check_mode and (json_data.get('PackageList') or json_data.get('Data')) and \
+ msg['update_status']['JobStatus'] == 'Completed':
+ module.exit_json(msg="Changes found to commit!", changed=True,
+ update_status=msg['update_status'])
+ elif module.check_mode and not msg['update_status']['JobStatus'] == 'Completed':
+ msg['update_status'].pop('job_details')
+ module.fail_json(msg="Unable to complete the firmware repository download.",
+ update_status=msg['update_status'])
+ elif not module.check_mode and "Status" in msg['update_status']:
+ if msg['update_status']['Status'] in ["Success", "InProgress"]:
+ if module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and (
+ 'job_details' in msg['update_status'] and repo_status) and not failed:
+ msg['changed'] = True
+ msg['update_msg'] = "Successfully updated the firmware."
+ elif module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and (
+ 'job_details' in msg['update_status'] and repo_status) and failed:
+ msg['failed'], msg['changed'] = True, False
+ msg['update_msg'] = "Firmware update failed."
+ else:
+ failed_msg = "Firmware update failed."
+ if not apply_update:
+ failed_msg = "Unable to complete the repository update."
+ module.fail_json(msg=failed_msg, update_status=msg['update_status'])
+ return msg
+
+
+def update_firmware_redfish(idrac, module, repo_urls):
+ """Update firmware from a network share and return the job details."""
+ msg = {}
+ msg['changed'], msg['failed'] = False, False
+ msg['update_msg'] = "Successfully triggered the job to update the firmware."
+ try:
+ share_name = module.params['share_name']
+ catalog_file_name = module.params['catalog_file_name']
+ share_user = module.params['share_user']
+ share_pwd = module.params['share_password']
+ reboot = module.params['reboot']
+ job_wait = module.params['job_wait']
+ ignore_cert_warning = module.params['ignore_cert_warning']
+ apply_update = module.params['apply_update']
+ payload = {"RebootNeeded": reboot, "CatalogFile": catalog_file_name, "ApplyUpdate": str(apply_update),
+ "IgnoreCertWarning": CERT_WARN[ignore_cert_warning]}
+ if share_user is not None:
+ payload['UserName'] = share_user
+ if share_pwd is not None:
+ payload['Password'] = share_pwd
+
+ if share_name.lower().startswith(('http://', 'https://', 'ftp://')):
+ msg['update_status'], job_details = update_firmware_url_redfish(
+ module, idrac, share_name, apply_update, reboot, job_wait, payload, repo_urls)
+ if job_details:
+ msg['update_status']['job_details'] = job_details
+ else:
+ if share_name.startswith('\\\\'):
+ cifs = share_name.split('\\')
+ payload['IPAddress'] = cifs[2]
+ payload['ShareName'] = '\\'.join(cifs[3:])
+ payload['ShareType'] = 'CIFS'
+ else:
+ nfs = urlparse(share_name)
+ payload['IPAddress'] = nfs.scheme
+ payload['ShareName'] = nfs.path.strip('/')
+ payload['ShareType'] = 'NFS'
+ resp = idrac.invoke_request(PATH, method="POST", data=payload)
+ job_id = get_jobid(module, resp)
+ resp, mesg = wait_for_job_completion(module, JOB_URI.format(job_id=job_id), job_wait, reboot, apply_update)
+ if not mesg:
+ msg['update_status'] = resp.json_data
+ else:
+ msg['update_status'] = mesg
+ try:
+ repo_based_update_list = idrac.invoke_request(GET_REPO_BASED_UPDATE_LIST_PATH, method="POST",
+ data="{}", dump=False)
+ msg['update_status']['job_details'] = repo_based_update_list.json_data
+ except HTTPError as err:
+ handle_HTTP_error(module, err)
+ raise err
+ json_data, repo_status, failed = msg['update_status']['job_details'], False, False
+ if "PackageList" not in json_data:
+ job_data = json_data.get('Data')
+ pkglst = job_data['body'] if 'body' in job_data else job_data.get('GetRepoBasedUpdateList_OUTPUT')
+ if 'PackageList' in pkglst:
+ pkglst['PackageList'], repo_status, failed = _convert_xmltojson(module, pkglst, idrac)
+ else:
+ json_data['PackageList'], repo_status, failed = _convert_xmltojson(module, json_data, None)
+
+ if not apply_update and not failed:
+ msg['update_msg'] = "Successfully fetched the applicable firmware update package list."
+ elif apply_update and not reboot and not job_wait and not failed:
+ msg['update_msg'] = "Successfully triggered the job to stage the firmware."
+ elif apply_update and job_wait and not reboot and not failed:
+ msg['update_msg'] = "Successfully staged the applicable firmware update packages."
+ msg['changed'] = True
+ elif apply_update and job_wait and not reboot and failed:
+ msg['update_msg'] = "Successfully staged the applicable firmware update packages with error(s)."
+ msg['failed'] = True
+
+ except RuntimeError as e:
+ module.fail_json(msg=str(e))
+
+ if module.check_mode and not (json_data.get('PackageList') or json_data.get('Data')) and \
+ msg['update_status']['JobStatus'] == 'OK':
+ module.exit_json(msg="No changes found to commit!")
+ elif module.check_mode and (json_data.get('PackageList') or json_data.get('Data')) and \
+ msg['update_status']['JobStatus'] == 'OK':
+ module.exit_json(msg="Changes found to commit!", changed=True,
+ update_status=msg['update_status'])
+ elif module.check_mode and not msg['update_status']['JobStatus'] == 'OK':
+ msg['update_status'].pop('job_details')
+ module.fail_json(msg="Unable to complete the firmware repository download.",
+ update_status=msg['update_status'])
+ elif not module.check_mode and "JobStatus" in msg['update_status']:
+ if not msg['update_status']['JobStatus'] == "Critical":
+ if module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and \
+ ('job_details' in msg['update_status'] and repo_status) and not failed:
+ msg['changed'] = True
+ msg['update_msg'] = "Successfully updated the firmware."
+ elif module.params['job_wait'] and module.params['apply_update'] and module.params['reboot'] and \
+ ('job_details' in msg['update_status'] and repo_status) and failed:
+ msg['failed'], msg['changed'] = True, False
+ msg['update_msg'] = "Firmware update failed."
+ else:
+ failed_msg = "Firmware update failed."
+ if not apply_update:
+ failed_msg = "Unable to complete the repository update."
+ module.fail_json(msg=failed_msg, update_status=msg['update_status'])
+ return msg
+
+
+def main():
+ specs = {
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"required": False, "type": 'str'},
+
+ "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
+ "reboot": {"required": False, "type": 'bool', "default": False},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ "ignore_cert_warning": {"required": False, "type": 'bool', "default": True},
+ "apply_update": {"required": False, "type": 'bool', "default": True},
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ redfish_check = False
+ try:
+ with iDRACRedfishAPI(module.params) as obj:
+ resp = obj.invoke_request(IDRAC_PATH, method="GET")
+ software_service_data = resp.json_data
+ redfish_check = True
+ except Exception:
+ software_service_data = {}
+ redfish_check = False
+
+ try:
+ # Validate the catalog file
+ _validate_catalog_file(module.params['catalog_file_name'])
+ if module.check_mode:
+ module.params['apply_update'] = False
+ module.params['reboot'] = False
+ module.params['job_wait'] = True
+ # Connect to iDRAC and update firmware
+ if redfish_check:
+ with iDRACRedfishAPI(module.params) as redfish_obj:
+ status = update_firmware_redfish(redfish_obj, module, software_service_data)
+ else:
+ with iDRACConnection(module.params) as idrac:
+ status = update_firmware_omsdk(idrac, module)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), update_status=json.load(err))
+ except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, SSLError) as e:
+ module.fail_json(msg=str(e))
+ except Exception as exc:
+ module.fail_json(msg="Unhandled Exception {0}".format(exc))
+
+ module.exit_json(msg=status['update_msg'], update_status=status['update_status'],
+ changed=status['changed'], failed=status['failed'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py
new file mode 100644
index 000000000..3f644f85e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_firmware_info
+short_description: Get Firmware Inventory
+version_added: "3.0.0"
+description: Get Firmware Inventory.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Rajeev Arakkal (@rajeevarakkal)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Get Installed Firmware Inventory
+ dellemc.openmanage.idrac_firmware_info:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: "Fetching the firmware inventory details."
+ returned: always
+ type: str
+ sample: "Successfully fetched the firmware inventory details."
+firmware_info:
+ type: dict
+ description: Details of the firmware.
+ returned: success
+ sample: {
+ "Firmware": [{
+ "BuildNumber": "0",
+ "Classifications": "10",
+ "ComponentID": "102573",
+ "ComponentType": "FRMW",
+ "DeviceID": null,
+ "ElementName": "Power Supply.Slot.1",
+ "FQDD": "PSU.Slot.1",
+ "HashValue": null,
+ "IdentityInfoType": "OrgID:ComponentType:ComponentID",
+ "IdentityInfoValue": "DCIM:firmware:102573",
+ "InstallationDate": "2018-11-22T03:58:23Z",
+ "InstanceID": "DCIM:INSTALLED#0x15__PSU.Slot.1",
+ "IsEntity": "true",
+ "Key": "DCIM:INSTALLED#0x15__PSU.Slot.1",
+ "MajorVersion": "0",
+ "MinorVersion": "3",
+ "RevisionNumber": "67",
+ "RevisionString": null,
+ "Status": "Installed",
+ "SubDeviceID": null,
+ "SubVendorID": null,
+ "Updateable": "true",
+ "VendorID": null,
+ "VersionString": "00.3D.67",
+ "impactsTPMmeasurements": "false"
+ }]
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+try:
+ from omsdk.sdkfile import LocalFile
+ from omsdk.catalog.sdkupdatemgr import UpdateManager
+ from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper
+ HAS_OMSDK = True
+except ImportError:
+ HAS_OMSDK = False
+
+
+# Main
+def main():
+ specs = {}
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ msg = idrac.update_mgr.InstalledFirmware
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(msg="Successfully fetched the firmware inventory details.",
+ firmware_info=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py
new file mode 100644
index 000000000..2d555f9a2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_lifecycle_controller_job_status_info
+short_description: Get the status of a Lifecycle Controller job
+version_added: "2.1.0"
+description: This module shows the status of a specific Lifecycle Controller job using its job ID.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ job_id:
+ required: True
+ type: str
+ description: JOB ID in the format "JID_123456789012".
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Rajeev Arakkal (@rajeevarakkal)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Show status of a Lifecycle Control job
+ dellemc.openmanage.idrac_lifecycle_controller_job_status_info:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "JID_1234567890"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the job facts operation.
+ returned: always
+ type: str
+ sample: "Successfully fetched the job info."
+job_info:
+ description: Displays the status of a Lifecycle Controller job.
+ returned: success
+ type: dict
+ sample: {
+ "ElapsedTimeSinceCompletion": "8742",
+ "InstanceID": "JID_844222910040",
+ "JobStartTime": "NA",
+ "JobStatus": "Completed",
+ "JobUntilTime": "NA",
+ "Message": "Job completed successfully.",
+ "MessageArguments": "NA",
+ "MessageID": "RED001",
+ "Name": "update:DCIM:INSTALLED#iDRAC.Embedded.1-1#IDRACinfo",
+ "PercentComplete": "100",
+ "Status": "Success"
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ specs = {
+ "job_id": {"required": True, "type": 'str'}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ job_id, msg, failed = module.params.get('job_id'), {}, False
+ msg = idrac.job_mgr.get_job_status(job_id)
+ if msg.get('Status') == "Found Fault":
+ module.fail_json(msg="Job ID is invalid.")
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as e:
+ module.fail_json(msg=str(e))
+ module.exit_json(msg="Successfully fetched the job info", job_info=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py
new file mode 100644
index 000000000..984f8e3f4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_lifecycle_controller_jobs
+short_description: Delete the Lifecycle Controller Jobs
+version_added: "2.1.0"
+description:
+ - Delete a Lifecycle Controller job using its job ID or delete all jobs.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ job_id:
+ type: str
+ description:
+ - Job ID of the specific job to be deleted.
+ - All the jobs in the job queue are deleted if this option is not specified.
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module does not support C(check_mode).
+"""
+EXAMPLES = """
+---
+- name: Delete Lifecycle Controller job queue
+ dellemc.openmanage.idrac_lifecycle_controller_jobs:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Delete Lifecycle Controller job using a job ID
+ dellemc.openmanage.idrac_lifecycle_controller_jobs:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "JID_801841929470"
+"""
+RETURN = """
+---
+msg:
+ type: str
+ description: Status of the delete operation.
+ returned: always
+ sample: 'Successfully deleted the job.'
+status:
+ type: dict
+ description: Details of the delete operation.
+ returned: success
+ sample: {
+ 'Message': 'The specified job was deleted',
+ 'MessageID': 'SUP020',
+ 'ReturnValue': '0'
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+
+def main():
+ specs = {
+ "job_id": {"required": False, "type": 'str'}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=False)
+ try:
+ with iDRACConnection(module.params) as idrac:
+ job_id, resp = module.params.get('job_id'), {}
+ if job_id is not None:
+ resp = idrac.job_mgr.delete_job(job_id)
+ jobstr = "job"
+ else:
+ resp = idrac.job_mgr.delete_all_jobs()
+ jobstr = "job queue"
+ if resp["Status"] == "Error":
+ msg = "Failed to delete the Job: {0}.".format(job_id)
+ module.fail_json(msg=msg, status=resp)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (ImportError, ValueError, RuntimeError, TypeError) as e:
+ module.fail_json(msg=str(e))
+ module.exit_json(msg="Successfully deleted the {0}.".format(jobstr), status=resp, changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py
new file mode 100644
index 000000000..74606260c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_lifecycle_controller_logs
+short_description: Export Lifecycle Controller logs to a network share or local path.
+version_added: "2.1.0"
+description:
+ - Export Lifecycle Controller logs to a given network share or local path.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ share_name:
+ description:
+ - Network share or local path.
+ - CIFS, NFS network share types are supported.
+ type: str
+ required: True
+ share_user:
+ type: str
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ share_password:
+ type: str
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ aliases: ['share_pwd']
+ job_wait:
+ description: Whether to wait for the running job completion or not.
+ type: bool
+ default: True
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Rajeev Arakkal (@rajeevarakkal)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Exporting data to a local share is supported only on iDRAC9-based PowerEdge Servers and later.
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module does not support C(check_mode).
+"""
+
+EXAMPLES = r'''
+---
+- name: Export lifecycle controller logs to NFS share.
+ dellemc.openmanage.idrac_lifecycle_controller_logs:
+ idrac_ip: "190.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.0:/nfsfileshare"
+
+- name: Export lifecycle controller logs to CIFS share.
+ dellemc.openmanage.idrac_lifecycle_controller_logs:
+ idrac_ip: "190.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "\\\\192.168.0.2\\share"
+ share_user: "share_user_name"
+ share_password: "share_user_pwd"
+
+- name: Export lifecycle controller logs to LOCAL path.
+ dellemc.openmanage.idrac_lifecycle_controller_logs:
+ idrac_ip: "190.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "/example/export_lc"
+'''
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Status of the export lifecycle controller logs job.
+ returned: always
+ sample: "Successfully exported the lifecycle controller logs."
+lc_logs_status:
+ description: Status of the export operation along with job details and file path.
+ returned: success
+ type: dict
+ sample: {
+ "ElapsedTimeSinceCompletion": "0",
+ "InstanceID": "JID_274774785395",
+ "JobStartTime": "NA",
+ "JobStatus": "Completed",
+ "JobUntilTime": "NA",
+ "Message": "LCL Export was successful",
+ "MessageArguments": "NA",
+ "MessageID": "LC022",
+ "Name": "LC Export",
+ "PercentComplete": "100",
+ "Status": "Success",
+ "file": "192.168.0.0:/nfsfileshare/190.168.0.1_20210728_133437_LC_Log.log",
+ "retval": true
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+import json
+try:
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+def get_user_credentials(module):
+ share_username = module.params['share_user']
+ share_password = module.params['share_password']
+ work_group = None
+ if share_username is not None and "@" in share_username:
+ username_domain = share_username.split("@")
+ share_username = username_domain[0]
+ work_group = username_domain[1]
+ elif share_username is not None and "\\" in share_username:
+ username_domain = share_username.split("\\")
+ work_group = username_domain[0]
+ share_username = username_domain[1]
+ share = file_share_manager.create_share_obj(share_path=module.params['share_name'],
+ creds=UserCredentials(share_username, share_password,
+ work_group=work_group), isFolder=True)
+ return share
+
+
+def run_export_lc_logs(idrac, module):
+ """
+ Export Lifecycle Controller Log to the given file share
+
+ Keyword arguments:
+ idrac -- iDRAC handle
+ module -- Ansible module
+ """
+
+ lclog_file_name_format = "%ip_%Y%m%d_%H%M%S_LC_Log.log"
+ share_username = module.params.get('share_user')
+ if (share_username is not None) and ("@" in share_username or "\\" in share_username):
+ myshare = get_user_credentials(module)
+ else:
+ myshare = file_share_manager.create_share_obj(share_path=module.params['share_name'],
+ creds=UserCredentials(module.params['share_user'],
+ module.params['share_password']),
+ isFolder=True)
+ lc_log_file = myshare.new_file(lclog_file_name_format)
+ job_wait = module.params['job_wait']
+ msg = idrac.log_mgr.lclog_export(lc_log_file, job_wait)
+ return msg
+
+
+# Main()
+def main():
+ specs = {
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=False)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ msg = run_export_lc_logs(idrac, module)
+ if msg.get("Status") in ["Failed", "Failure"] or msg.get("JobStatus") in ["Failed", "Failure"]:
+ msg.pop("file", None)
+ module.fail_json(msg="Unable to export the lifecycle controller logs.", lc_logs_status=msg)
+ message = "Successfully exported the lifecycle controller logs."
+ if module.params['job_wait'] is False:
+ message = "The export lifecycle controller log job is submitted successfully."
+ module.exit_json(msg=message, lc_logs_status=msg)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py
new file mode 100644
index 000000000..3d3bddc03
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_lifecycle_controller_status_info
+short_description: Get the status of the Lifecycle Controller
+version_added: "2.1.0"
+description:
+ - This module shows the status of the Lifecycle Controller on a Dell EMC PowerEdge server.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Rajeev Arakkal (@rajeevarakkal)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Show status of the Lifecycle Controller
+ dellemc.openmanage.idrac_lifecycle_controller_status_info:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of fetching lifecycle controller status.
+ returned: always
+ type: str
+ sample: "Successfully fetched the lifecycle controller status."
+lc_status_info:
+ description: Displays the status of the Lifecycle Controller on a Dell EMC PowerEdge server.
+ returned: success
+ type: dict
+ sample: {
+ "msg": {
+ "LCReady": true,
+ "LCStatus": "Ready"
+ }
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.basic import AnsibleModule
+import json
+
+
+def main():
+ specs = {}
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ lcready = idrac.config_mgr.LCReady
+ lcstatus = idrac.config_mgr.LCStatus
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as e:
+ module.fail_json(msg=str(e))
+ module.exit_json(msg="Successfully fetched the lifecycle controller status.",
+ lc_status_info={'LCReady': lcready, 'LCStatus': lcstatus})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py
new file mode 100644
index 000000000..8f2930165
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py
@@ -0,0 +1,444 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.0.0
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_network
+short_description: Configures the iDRAC network attributes
+version_added: "2.1.0"
+deprecated:
+ removed_at_date: "2024-07-31"
+ why: Replaced with M(dellemc.openmanage.idrac_attributes).
+ alternative: Use M(dellemc.openmanage.idrac_attributes) instead.
+ removed_from_collection: dellemc.openmanage
+description:
+ - This module allows to configure iDRAC network settings.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ share_name:
+ type: str
+ description:
+ - (deprecated)Network share or a local path.
+ - This option is deprecated and will be removed in the later version.
+ share_user:
+ type: str
+ description:
+ - (deprecated)Network share user name. Use the format 'user@domain' or 'domain\\user' if user is part of a domain.
+ This option is mandatory for CIFS share.
+ - This option is deprecated and will be removed in the later version.
+ share_password:
+ type: str
+ description:
+ - (deprecated)Network share user password. This option is mandatory for CIFS share.
+ - This option is deprecated and will be removed in the later version.
+ aliases: ['share_pwd']
+ share_mnt:
+ type: str
+ description:
+ - (deprecated)Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for network shares.
+ - This option is deprecated and will be removed in the later version.
+ setup_idrac_nic_vlan:
+ type: str
+ description: Allows to configure VLAN on iDRAC.
+ choices: [Enabled, Disabled]
+ register_idrac_on_dns:
+ type: str
+ description: Registers iDRAC on a Domain Name System (DNS).
+ choices: [Enabled, Disabled]
+ dns_idrac_name:
+ type: str
+ description: Name of the DNS to register iDRAC.
+ auto_config:
+ type: str
+ description: Allows to enable or disable auto-provisioning to automatically acquire domain name from DHCP.
+ choices: [Enabled, Disabled]
+ static_dns:
+ type: str
+ description: Enter the static DNS domain name.
+ vlan_id:
+ type: int
+ description: Enter the VLAN ID. The VLAN ID must be a number from 1 through 4094.
+ vlan_priority:
+ type: int
+ description: Enter the priority for the VLAN ID. The priority value must be a number from 0 through 7.
+ enable_nic:
+ type: str
+ description: Allows to enable or disable the Network Interface Controller (NIC) used by iDRAC.
+ choices: [Enabled, Disabled]
+ nic_selection:
+ type: str
+ description: Select one of the available NICs.
+ choices: [Dedicated, LOM1, LOM2, LOM3, LOM4]
+ failover_network:
+ type: str
+ description: "Select one of the remaining LOMs. If a network fails, the traffic is routed through the failover
+ network."
+ choices: [ALL, LOM1, LOM2, LOM3, LOM4, T_None]
+ auto_detect:
+ type: str
+ description: Allows to auto detect the available NIC types used by iDRAC.
+ choices: [Enabled, Disabled]
+ auto_negotiation:
+ type: str
+ description: Allows iDRAC to automatically set the duplex mode and network speed.
+ choices: [Enabled, Disabled]
+ network_speed:
+ type: str
+ description: Select the network speed for the selected NIC.
+ choices: [T_10, T_100, T_1000]
+ duplex_mode:
+ type: str
+ description: Select the type of data transmission for the NIC.
+ choices: [Full, Half]
+ nic_mtu:
+ type: int
+ description: Maximum Transmission Unit of the NIC.
+ ip_address:
+ type: str
+ description: Enter a valid iDRAC static IPv4 address.
+ enable_dhcp:
+ type: str
+ description: Allows to enable or disable Dynamic Host Configuration Protocol (DHCP) in iDRAC.
+ choices: [Enabled, Disabled]
+ enable_ipv4:
+ type: str
+ description: Allows to enable or disable IPv4 configuration.
+ choices: [Enabled, Disabled]
+ dns_from_dhcp:
+ type: str
+ description: Allows to enable DHCP to obtain DNS server address.
+ choices: [Enabled, Disabled]
+ static_dns_1:
+ type: str
+ description: Enter the preferred static DNS server IPv4 address.
+ static_dns_2:
+ type: str
+ description: Enter the preferred static DNS server IPv4 address.
+ static_gateway:
+ type: str
+ description: Enter the static IPv4 gateway address to iDRAC.
+ static_net_mask:
+ type: str
+ description: Enter the static IP subnet mask to iDRAC.
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure iDRAC network settings
+ dellemc.openmanage.idrac_network:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ register_idrac_on_dns: Enabled
+ dns_idrac_name: None
+ auto_config: None
+ static_dns: None
+ setup_idrac_nic_vlan: Enabled
+ vlan_id: 0
+ vlan_priority: 1
+ enable_nic: Enabled
+ nic_selection: Dedicated
+ failover_network: T_None
+ auto_detect: Disabled
+ auto_negotiation: Enabled
+ network_speed: T_1000
+ duplex_mode: Full
+ nic_mtu: 1500
+ ip_address: "192.168.0.1"
+ enable_dhcp: Enabled
+ enable_ipv4: Enabled
+ static_dns_1: "192.168.0.1"
+ static_dns_2: "192.168.0.1"
+ dns_from_dhcp: Enabled
+ static_gateway: None
+ static_net_mask: None
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Successfully configured the idrac network settings.
+ returned: always
+ type: str
+ sample: "Successfully configured the idrac network settings."
+network_status:
+ description: Status of the Network settings operation job.
+ returned: success
+ type: dict
+ sample: {
+ "@odata.context": "/redfish/v1/$metadata#DellJob.DellJob",
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_856418531008",
+ "@odata.type": "#DellJob.v1_0_2.DellJob",
+ "CompletionTime": "2020-03-31T03:04:15",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_856418531008",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "SYS053",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import os
+import tempfile
+import json
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omdrivers.enums.iDRAC.iDRAC import (DNSRegister_NICTypes, DNSDomainFromDHCP_NICStaticTypes,
+ Enable_NICTypes, VLanEnable_NICTypes,
+ Selection_NICTypes, Failover_NICTypes,
+ AutoDetect_NICTypes, Autoneg_NICTypes,
+ Speed_NICTypes, Duplex_NICTypes, DHCPEnable_IPv4Types,
+ DNSFromDHCP_IPv4Types, Enable_IPv4Types,
+ DNSFromDHCP_IPv4StaticTypes)
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+def run_idrac_network_config(idrac, module):
+ idrac.use_redfish = True
+ share_path = tempfile.gettempdir() + os.sep
+ upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True)
+ if not upd_share.IsValid:
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ idrac.config_mgr.set_liason_share(upd_share)
+ if module.params['register_idrac_on_dns'] is not None:
+ idrac.config_mgr.configure_dns(
+ register_idrac_on_dns=DNSRegister_NICTypes[module.params['register_idrac_on_dns']]
+ )
+ if module.params['dns_idrac_name'] is not None:
+ idrac.config_mgr.configure_dns(
+ dns_idrac_name=module.params['dns_idrac_name']
+ )
+ if module.params['auto_config'] is not None:
+ idrac.config_mgr.configure_dns(
+ auto_config=DNSDomainFromDHCP_NICStaticTypes[module.params['auto_config']]
+ )
+ if module.params['static_dns'] is not None:
+ idrac.config_mgr.configure_dns(
+ static_dns=module.params['static_dns']
+ )
+
+ if module.params['setup_idrac_nic_vlan'] is not None:
+ idrac.config_mgr.configure_nic_vlan(
+ vlan_enable=VLanEnable_NICTypes[module.params['setup_idrac_nic_vlan']]
+ )
+ if module.params['vlan_id'] is not None:
+ idrac.config_mgr.configure_nic_vlan(
+ vlan_id=module.params['vlan_id']
+ )
+ if module.params['vlan_priority'] is not None:
+ idrac.config_mgr.configure_nic_vlan(
+ vlan_priority=module.params['vlan_priority']
+ )
+
+ if module.params['enable_nic'] is not None:
+ idrac.config_mgr.configure_network_settings(
+ enable_nic=Enable_NICTypes[module.params['enable_nic']]
+ )
+ if module.params['nic_selection'] is not None:
+ idrac.config_mgr.configure_network_settings(
+ nic_selection=Selection_NICTypes[module.params['nic_selection']]
+ )
+ if module.params['failover_network'] is not None:
+ idrac.config_mgr.configure_network_settings(
+ failover_network=Failover_NICTypes[module.params['failover_network']]
+ )
+ if module.params['auto_detect'] is not None:
+ idrac.config_mgr.configure_network_settings(
+ auto_detect=AutoDetect_NICTypes[module.params['auto_detect']]
+ )
+ if module.params['auto_negotiation'] is not None:
+ idrac.config_mgr.configure_network_settings(
+ auto_negotiation=Autoneg_NICTypes[module.params['auto_negotiation']]
+ )
+ if module.params['network_speed'] is not None:
+ idrac.config_mgr.configure_network_settings(
+ network_speed=Speed_NICTypes[module.params['network_speed']]
+ )
+ if module.params['duplex_mode'] is not None:
+ idrac.config_mgr.configure_network_settings(
+ duplex_mode=Duplex_NICTypes[module.params['duplex_mode']]
+ )
+ if module.params['nic_mtu'] is not None:
+ idrac.config_mgr.configure_network_settings(
+ nic_mtu=module.params['nic_mtu']
+ )
+
+ if module.params['enable_dhcp'] is not None:
+ idrac.config_mgr.configure_ipv4(
+ enable_dhcp=DHCPEnable_IPv4Types[module.params["enable_dhcp"]]
+ )
+ if module.params['ip_address'] is not None:
+ idrac.config_mgr.configure_ipv4(
+ ip_address=module.params["ip_address"]
+ )
+ if module.params['enable_ipv4'] is not None:
+ idrac.config_mgr.configure_ipv4(
+ enable_ipv4=Enable_IPv4Types[module.params["enable_ipv4"]]
+ )
+ if module.params['dns_from_dhcp'] is not None:
+ idrac.config_mgr.configure_static_ipv4(
+ dns_from_dhcp=DNSFromDHCP_IPv4StaticTypes[module.params["dns_from_dhcp"]]
+ )
+ if module.params['static_dns_1'] is not None:
+ idrac.config_mgr.configure_static_ipv4(
+ dns_1=module.params["static_dns_1"]
+ )
+ if module.params['static_dns_2'] is not None:
+ idrac.config_mgr.configure_static_ipv4(
+ dns_2=module.params["static_dns_2"]
+ )
+ if module.params['static_gateway'] is not None:
+ idrac.config_mgr.configure_static_ipv4(
+ gateway=module.params["static_gateway"]
+ )
+ if module.params['static_net_mask'] is not None:
+ idrac.config_mgr.configure_static_ipv4(
+ net_mask=module.params["static_net_mask"]
+ )
+
+ if module.check_mode:
+ msg = idrac.config_mgr.is_change_applicable()
+ else:
+ msg = idrac.config_mgr.apply_changes(reboot=False)
+ return msg
+
+
+# Main
+def main():
+ specs = {
+ # Export Destination
+ "share_name": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_user": {"required": False, "type": 'str'},
+ "share_mnt": {"required": False, "type": 'str'},
+
+ # setup DNS
+ "register_idrac_on_dns": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None},
+ "dns_idrac_name": {"required": False, "default": None, "type": 'str'},
+ "auto_config": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None, 'type': 'str'},
+ "static_dns": {"required": False, "default": None, "type": "str"},
+
+ # set up idrac vlan
+ "setup_idrac_nic_vlan": {"required": False, "choices": ['Enabled', 'Disabled']},
+ "vlan_id": {"required": False, "type": 'int'},
+ "vlan_priority": {"required": False, "type": 'int'},
+
+ # set up NIC
+ "enable_nic": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None},
+ "nic_selection": {"required": False, "choices": ['Dedicated', 'LOM1', 'LOM2', 'LOM3', 'LOM4'], "default": None},
+ "failover_network": {"required": False, "choices": ['ALL', 'LOM1', 'LOM2', 'LOM3', 'LOM4', 'T_None'],
+ "default": None},
+ "auto_detect": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None},
+ "auto_negotiation": {"required": False, "choices": ['Enabled', 'Disabled'], "default": None},
+ "network_speed": {"required": False, "choices": ['T_10', 'T_100', 'T_1000'], "default": None},
+ "duplex_mode": {"required": False, "choices": ['Full', 'Half'], "default": None},
+ "nic_mtu": {"required": False, 'type': 'int'},
+
+ # setup iDRAC IPV4
+ "ip_address": {"required": False, "default": None, "type": "str"},
+ "enable_dhcp": {"required": False, "choices": ["Enabled", "Disabled"], "default": None},
+ "enable_ipv4": {"required": False, "choices": ["Enabled", "Disabled"], "default": None},
+
+ # setup iDRAC Static IPv4
+ "dns_from_dhcp": {"required": False, "choices": ["Enabled", "Disabled"], "default": None},
+ "static_dns_1": {"required": False, "default": None, "type": "str"},
+ "static_dns_2": {"required": False, "default": None, "type": "str"},
+ "static_gateway": {"required": False, "type": "str"},
+ "static_net_mask": {"required": False, "type": "str"},
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ msg = run_idrac_network_config(idrac, module)
+ changed, failed = False, False
+ if msg.get('Status') == "Success":
+ changed = True
+ if msg.get('Message') == "No changes found to commit!":
+ changed = False
+ if "No changes were applied" in msg.get('Message'):
+ changed = False
+ elif msg.get('Status') == "Failed":
+ failed = True
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except AttributeError as err:
+ if "NoneType" in str(err):
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as e:
+ module.fail_json(msg=str(e))
+ module.exit_json(msg="Successfully configured the idrac network settings.",
+ network_status=msg, changed=changed, failed=failed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py
new file mode 100644
index 000000000..797534e39
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_os_deployment
+short_description: Boot to a network ISO image
+version_added: "2.1.0"
+description: Boot to a network ISO image.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ share_name:
+ required: True
+ description: CIFS or NFS Network share.
+ type: str
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ iso_image:
+ required: True
+ description: Network ISO name.
+ type: str
+ expose_duration:
+ description: It is the time taken in minutes for the ISO image file to be exposed as a local CD-ROM device to
+ the host server. When the time expires, the ISO image gets automatically detached.
+ type: int
+ default: 1080
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Jagadeesh N V (@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Boot to Network ISO
+ dellemc.openmanage.idrac_os_deployment:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.0:/nfsfileshare"
+ iso_image: "unattended_os_image.iso"
+ expose_duration: 180
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Over all device information status.
+ returned: on error
+ sample: "Failed to boot to network iso"
+boot_status:
+ description: Details of the boot to network ISO image operation.
+ returned: always
+ type: dict
+ sample: {
+ "DeleteOnCompletion": "false",
+ "InstanceID": "DCIM_OSDConcreteJob:1",
+ "JobName": "BootToNetworkISO",
+ "JobStatus": "Success",
+ "Message": "The command was successful.",
+ "MessageID": "OSD1",
+ "Name": "BootToNetworkISO",
+ "Status": "Success",
+ "file": "192.168.0.0:/nfsfileshare/unattended_os_image.iso",
+ "retval": true
+ }
+'''
+
+
+import os
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+try:
+ from omsdk.sdkfile import FileOnShare
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+def minutes_to_cim_format(module, dur_minutes):
+ try:
+ if dur_minutes < 0:
+ module.fail_json(msg="Invalid value for ExposeDuration.")
+ MIN_PER_HOUR = 60
+ MIN_PER_DAY = 1440
+ days = dur_minutes // MIN_PER_DAY
+ minutes = dur_minutes % MIN_PER_DAY
+ hours = minutes // MIN_PER_HOUR
+ minutes = minutes % MIN_PER_HOUR
+ if days > 0:
+ hours = 23
+ cim_format = "{:08d}{:02d}{:02d}00.000000:000"
+ cim_time = cim_format.format(days, hours, minutes)
+ except Exception:
+ module.fail_json(msg="Invalid value for ExposeDuration.")
+ return cim_time
+
+
+def run_boot_to_network_iso(idrac, module):
+ """Boot to a network ISO image"""
+ try:
+ share_name = module.params['share_name']
+ if share_name is None:
+ share_name = ''
+ share_obj = FileOnShare(remote="{0}{1}{2}".format(share_name, os.sep, module.params['iso_image']),
+ isFolder=False, creds=UserCredentials(module.params['share_user'],
+ module.params['share_password'])
+ )
+ cim_exp_duration = minutes_to_cim_format(module, module.params['expose_duration'])
+ boot_status = idrac.config_mgr.boot_to_network_iso(share_obj, "", expose_duration=cim_exp_duration)
+ if not boot_status.get("Status", False) == "Success":
+ module.fail_json(msg=boot_status)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ return boot_status
+
+
+def main():
+ specs = {
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "iso_image": {"required": True, "type": 'str'},
+ "expose_duration": {"required": False, "type": 'int', "default": 1080}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=False)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ boot_status = run_boot_to_network_iso(idrac, module)
+ module.exit_json(changed=True, boot_status=boot_status)
+ except (ImportError, ValueError, RuntimeError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py
new file mode 100644
index 000000000..a506e5ce2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py
@@ -0,0 +1,773 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 6.3.0
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_redfish_storage_controller
+short_description: Configures the physical disk, virtual disk, and storage controller settings
+version_added: "2.1.0"
+description:
+ - This module allows the users to configure the settings of the physical disk, virtual disk,
+ and storage controller.
+extends_documentation_fragment:
+ - dellemc.openmanage.redfish_auth_options
+options:
+ command:
+ description:
+ - These actions may require a system reset, depending on the capabilities of the controller.
+ - C(ResetConfig) - Deletes all the virtual disks and unassigns all hot spares on physical disks.
+ I(controller_id) is required for this operation.
+ - C(AssignSpare) - Assigns a physical disk as a dedicated or global hot spare for a virtual disk.
+ I(target) is required for this operation.
+ - C(SetControllerKey) - Sets the key on controllers, which is used to encrypt the drives in Local
+ Key Management(LKM). I(controller_id), I(key), and I(key_id) are required for this operation.
+ - C(RemoveControllerKey) - Deletes the encryption key on the controller.
+ I(controller_id) is required for this operation.
+ - C(ReKey) - Resets the key on the controller and it always reports as changes found when check mode is enabled.
+ I(controller_id), I(old_key), I(key_id), and I(key) is required for this operation.
+ - C(UnassignSpare) - To unassign the Global or Dedicated hot spare. I(target) is required for this operation.
+ - C(EnableControllerEncryption) - To enable Local Key Management (LKM) or Secure Enterprise Key Manager (SEKM)
+ on controllers that support encryption of the drives. I(controller_id), I(key), and I(key_id) are required
+ for this operation.
+ - C(BlinkTarget) - Blinks the target virtual drive or physical disk and it always reports as changes found
+ when check mode is enabled. I(target) or I(volume_id) is required for this operation.
+ - C(UnBlinkTarget) - Unblink the target virtual drive or physical disk and and it always reports as changes
+ found when check mode is enabled. I(target) or I(volume_id) is required for this operation.
+ - C(ConvertToRAID) - Converts the disk form non-Raid to Raid. I(target) is required for this operation.
+ - C(ConvertToNonRAID) - Converts the disk form Raid to non-Raid. I(target) is required for this operation.
+ - C(ChangePDStateToOnline) - To set the disk status to online. I(target) is required for this operation.
+ - C(ChangePDStateToOffline) - To set the disk status to offline. I(target) is required for this operation.
+ - C(LockVirtualDisk) - To encrypt the virtual disk. I(volume_id) is required for this operation.
+ choices: [ResetConfig, AssignSpare, SetControllerKey, RemoveControllerKey, ReKey, UnassignSpare,
+ EnableControllerEncryption, BlinkTarget, UnBlinkTarget, ConvertToRAID, ConvertToNonRAID,
+ ChangePDStateToOnline, ChangePDStateToOffline, LockVirtualDisk]
+ default: AssignSpare
+ type: str
+ target:
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the target physical drive.
+ - This is mandatory when I(command) is C(AssignSpare), C(UnassisgnSpare),
+ C(ChangePDStateToOnline), C(ChangePDStateToOffline), C(ConvertToRAID), or C(ConvertToNonRAID).
+ - If I(volume_id) is not specified or empty, this physical drive will be
+ assigned as a global hot spare when I(command) is C(AssignSpare).
+ - "Notes: Global or Dedicated hot spare can be assigned only once for a physical disk,
+ Re-assign cannot be done when I(command) is C(AssignSpare)."
+ type: list
+ elements: str
+ aliases: [drive_id]
+ volume_id:
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the volume.
+ - Applicable if I(command) is C(AssignSpare), C(BlinkTarget), C(UnBlinkTarget) or C(LockVirtualDisk).
+ - I(volume_id) or I(target) is required when the I(command) is C(BlinkTarget) or C(UnBlinkTarget),
+ if both are specified I(target) is considered.
+ - To know the number of volumes to which a hot spare can be assigned, refer iDRAC Redfish API documentation.
+ type: list
+ elements: str
+ controller_id:
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the storage controller. For example-'RAID.Slot.1-1'.
+ - This option is mandatory when I(command) is C(ResetConfig), C(SetControllerKey),
+ C(RemoveControllerKey), C(ReKey), or C(EnableControllerEncryption).
+ type: str
+ key:
+ description:
+ - A new security key passphrase that the encryption-capable controller uses to create the
+ encryption key. The controller uses the encryption key to lock or unlock access to the
+ Self-Encrypting Drive (SED). Only one encryption key can be created for each controller.
+ - This is mandatory when I(command) is C(SetControllerKey), C(ReKey), or C(EnableControllerEncryption)
+ and when I(mode) is C(LKM).
+ - The length of the key can be a maximum of 32 characters in length, where the expanded form of
+ the special character is counted as a single character.
+ - "The key must contain at least one character from each of the character classes: uppercase,
+ lowercase, number, and special character."
+ type: str
+ key_id:
+ description:
+ - This is a user supplied text label associated with the passphrase.
+ - This is mandatory when I(command) is C(SetControllerKey), C(ReKey), or C(EnableControllerEncryption)
+ and when I(mode) is C(LKM).
+ - The length of I(key_id) can be a maximum of 32 characters in length and should not have any spaces.
+ type: str
+ old_key:
+ description:
+ - Security key passphrase used by the encryption-capable controller.
+ - This option is mandatory when I(command) is C(ReKey) and I(mode) is C(LKM).
+ type: str
+ mode:
+ description:
+ - Encryption mode of the encryption capable controller.
+ - This option is applicable only when I(command) is C(ReKey) or C(EnableControllerEncryption).
+ - C(SEKM) requires secure enterprise key manager license on the iDRAC.
+ - C(LKM) to choose mode as local key mode.
+ choices: [LKM, SEKM]
+ default: LKM
+ type: str
+ job_wait:
+ description:
+ - Provides the option if the module has to wait for the job to be completed.
+ type: bool
+ default: False
+ job_wait_timeout:
+ description:
+ - The maximum wait time of job completion in seconds before the job tracking is stopped.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 120
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V (@jagadeeshnv)"
+ - "Felix Stephen (@felixs88)"
+ - "Husniya Hameed (@husniya_hameed)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module always reports as changes found when C(ReKey), C(BlinkTarget), and C(UnBlinkTarget).
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Assign dedicated hot spare
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ volume_id:
+ - "Disk.Virtual.0:RAID.Slot.1-1"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - assign_dedicated_hot_spare
+
+- name: Assign global hot spare
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - assign_global_hot_spare
+
+- name: Unassign hot spare
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ command: UnassignSpare
+ tags:
+ - un-assign-hot-spare
+
+- name: Set controller encryption key
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "SetControllerKey"
+ controller_id: "RAID.Slot.1-1"
+ key: "PassPhrase@123"
+ key_id: "mykeyid123"
+ tags:
+ - set_controller_key
+
+- name: Rekey in LKM mode
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ReKey"
+ controller_id: "RAID.Slot.1-1"
+ key: "NewPassPhrase@123"
+ key_id: "newkeyid123"
+ old_key: "OldPassPhrase@123"
+ tags:
+ - rekey_lkm
+
+- name: Rekey in SEKM mode
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ReKey"
+ controller_id: "RAID.Slot.1-1"
+ mode: "SEKM"
+ tags:
+ - rekey_sekm
+
+- name: Remove controller key
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "RemoveControllerKey"
+ controller_id: "RAID.Slot.1-1"
+ tags:
+ - remove_controller_key
+
+- name: Reset controller configuration
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ResetConfig"
+ controller_id: "RAID.Slot.1-1"
+ tags:
+ - reset_config
+
+- name: Enable controller encryption
+ idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "EnableControllerEncryption"
+ controller_id: "RAID.Slot.1-1"
+ mode: "LKM"
+ key: "your_Key@123"
+ key_id: "your_Keyid@123"
+ tags:
+ - enable-encrypt
+
+- name: Blink physical disk.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: BlinkTarget
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - blink-target
+
+- name: Blink virtual drive.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: BlinkTarget
+ volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
+ tags:
+ - blink-volume
+
+- name: Unblink physical disk.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: UnBlinkTarget
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - unblink-target
+
+- name: Unblink virtual drive.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: UnBlinkTarget
+ volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
+ tags:
+ - unblink-drive
+
+- name: Convert physical disk to RAID
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ConvertToRAID"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - convert-raid
+
+- name: Convert physical disk to non-RAID
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ConvertToNonRAID"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - convert-non-raid
+
+- name: Change physical disk state to online.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ChangePDStateToOnline"
+ target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - pd-state-online
+
+- name: Change physical disk state to offline.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ChangePDStateToOnline"
+ target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - pd-state-offline
+
+- name: Lock virtual drive
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "LockVirtualDisk"
+ volume_id: "Disk.Virtual.0:RAID.SL.3-1"
+ tags:
+ - lock
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the storage controller configuration operation.
+ returned: always
+ sample: "Successfully submitted the job that performs the AssignSpare operation"
+task:
+ type: dict
+ description: ID and URI resource of the job created.
+ returned: success
+ sample: {
+ "id": "JID_XXXXXXXXXXXXX",
+ "uri": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"
+ }
+status:
+ type: dict
+ description: status of the submitted job.
+ returned: always
+ sample: {
+ "ActualRunningStartTime": "2022-02-09T04:42:41",
+ "ActualRunningStopTime": "2022-02-09T04:44:00",
+ "CompletionTime": "2022-02-09T04:44:00",
+ "Description": "Job Instance",
+ "EndTime": "TIME_NA",
+ "Id": "JID_444033604418",
+ "JobState": "Completed",
+ "JobType": "RealTimeNoRebootConfiguration",
+ "Message": "Job completed successfully.",
+ "MessageArgs":[],
+ "MessageId": "PR19",
+ "Name": "Configure: RAID.Integrated.1-1",
+ "PercentComplete": 100,
+ "StartTime": "2022-02-09T04:42:40",
+ "TargetSettingsURI": null
+ }
+error_info:
+ type: dict
+ description: Details of a http error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to run the method because the requested HTTP method is not allowed.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "iDRAC.1.6.SYS402",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "Enter a valid HTTP method and retry the operation. For information about
+ valid methods, see the Redfish Users Guide available on the support site.",
+ "Severity": "Informational"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information"
+ }
+ }
+'''
+
+
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import wait_for_job_completion, strip_substr_dict
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+SYSTEM_ID = "System.Embedded.1"
+MANAGER_ID = "iDRAC.Embedded.1"
+RAID_ACTION_URI = "/redfish/v1/Systems/{system_id}/Oem/Dell/DellRaidService/Actions/DellRaidService.{action}"
+CONTROLLER_URI = "/redfish/v1/Dell/Systems/{system_id}/Storage/DellController/{controller_id}"
+VOLUME_URI = "/redfish/v1/Systems/{system_id}/Storage/{controller_id}/Volumes"
+PD_URI = "/redfish/v1/Systems/System.Embedded.1/Storage/{controller_id}/Drives/{drive_id}"
+JOB_URI_OEM = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/{job_id}"
+
+JOB_SUBMISSION = "Successfully submitted the job that performs the '{0}' operation."
+JOB_COMPLETION = "Successfully performed the '{0}' operation."
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+TARGET_ERR_MSG = "The Fully Qualified Device Descriptor (FQDD) of the target {0} must be only one."
+PD_ERROR_MSG = "Unable to locate the physical disk with the ID: {0}"
+ENCRYPT_ERR_MSG = "The storage controller '{0}' does not support encryption."
+PHYSICAL_DISK_ERR = "Volume is not encryption capable."
+
+
+def check_id_exists(module, redfish_obj, key, item_id, uri):
+ msg = "{0} with id '{1}' not found in system".format(key, item_id)
+ try:
+ resp = redfish_obj.invoke_request("GET", uri.format(system_id=SYSTEM_ID, controller_id=item_id))
+ if not resp.success:
+ module.fail_json(msg=msg)
+ except HTTPError as err:
+ module.fail_json(msg=msg, error_info=json.load(err))
+
+
+def ctrl_key(module, redfish_obj):
+ resp, job_uri, job_id, payload = None, None, None, {}
+ controller_id = module.params.get("controller_id")
+ command, mode = module.params["command"], module.params["mode"]
+ key, key_id = module.params.get("key"), module.params.get("key_id")
+ check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI)
+ ctrl_resp = redfish_obj.invoke_request("GET", CONTROLLER_URI.format(system_id=SYSTEM_ID,
+ controller_id=controller_id))
+ security_status = ctrl_resp.json_data.get("SecurityStatus")
+ if security_status == "EncryptionNotCapable":
+ module.fail_json(msg=ENCRYPT_ERR_MSG.format(controller_id))
+ ctrl_key_id = ctrl_resp.json_data.get("KeyID")
+ if command == "SetControllerKey":
+ if module.check_mode and ctrl_key_id is None:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and ctrl_key_id is not None) or (not module.check_mode and ctrl_key_id is not None):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ payload = {"TargetFQDD": controller_id, "Key": key, "Keyid": key_id}
+ elif command == "ReKey":
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ if mode == "LKM":
+ payload = {"TargetFQDD": controller_id, "Mode": mode, "NewKey": key,
+ "Keyid": key_id, "OldKey": module.params.get("old_key")}
+ else:
+ payload = {"TargetFQDD": controller_id, "Mode": mode}
+ elif command == "RemoveControllerKey":
+ if module.check_mode and ctrl_key_id is not None:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and ctrl_key_id is None) or (not module.check_mode and ctrl_key_id is None):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ payload = {"TargetFQDD": controller_id}
+ elif command == "EnableControllerEncryption":
+ if module.check_mode and not security_status == "SecurityKeyAssigned":
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and security_status == "SecurityKeyAssigned") or \
+ (not module.check_mode and security_status == "SecurityKeyAssigned"):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ payload = {"TargetFQDD": controller_id, "Mode": mode}
+ if mode == "LKM":
+ payload["Key"] = key
+ payload["Keyid"] = key_id
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID, action=command),
+ data=payload)
+ job_uri = resp.headers.get("Location")
+ job_id = job_uri.split("/")[-1]
+ return resp, job_uri, job_id
+
+
+def ctrl_reset_config(module, redfish_obj):
+ resp, job_uri, job_id = None, None, None
+ controller_id = module.params.get("controller_id")
+ check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI)
+ member_resp = redfish_obj.invoke_request("GET", VOLUME_URI.format(system_id=SYSTEM_ID, controller_id=controller_id))
+ members = member_resp.json_data.get("Members")
+ if module.check_mode and members:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and not members) or (not module.check_mode and not members):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID,
+ action=module.params["command"]),
+ data={"TargetFQDD": controller_id})
+ job_uri = resp.headers.get("Location")
+ job_id = job_uri.split("/")[-1]
+ return resp, job_uri, job_id
+
+
+def hot_spare_config(module, redfish_obj):
+ target, command = module.params.get("target"), module.params["command"]
+ resp, job_uri, job_id = None, None, None
+ volume = module.params.get("volume_id")
+ controller_id = target[0].split(":")[-1]
+ drive_id = target[0]
+ try:
+ pd_resp = redfish_obj.invoke_request("GET", PD_URI.format(controller_id=controller_id, drive_id=drive_id))
+ except HTTPError:
+ module.fail_json(msg=PD_ERROR_MSG.format(drive_id))
+ else:
+ hot_spare = pd_resp.json_data.get("HotspareType")
+ if module.check_mode and hot_spare == "None" and command == "AssignSpare" or \
+ (module.check_mode and not hot_spare == "None" and command == "UnassignSpare"):
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and hot_spare in ["Dedicated", "Global"] and command == "AssignSpare") or \
+ (not module.check_mode and hot_spare in ["Dedicated", "Global"] and command == "AssignSpare") or \
+ (module.check_mode and hot_spare == "None" and command == "UnassignSpare") or \
+ (not module.check_mode and hot_spare == "None" and command == "UnassignSpare"):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ payload = {"TargetFQDD": drive_id}
+ if volume is not None and command == "AssignSpare":
+ payload["VirtualDiskArray"] = volume
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID,
+ action=command),
+ data=payload)
+ job_uri = resp.headers.get("Location")
+ job_id = job_uri.split("/")[-1]
+ return resp, job_uri, job_id
+
+
+def change_pd_status(module, redfish_obj):
+ resp, job_uri, job_id = None, None, None
+ command, target = module.params["command"], module.params.get("target")
+ controller_id = target[0].split(":")[-1]
+ drive_id = target[0]
+ state = "Online" if command == "ChangePDStateToOnline" else "Offline"
+ try:
+ pd_resp = redfish_obj.invoke_request("GET", PD_URI.format(controller_id=controller_id, drive_id=drive_id))
+ raid_status = pd_resp.json_data["Oem"]["Dell"]["DellPhysicalDisk"]["RaidStatus"]
+ except HTTPError:
+ module.fail_json(msg=PD_ERROR_MSG.format(drive_id))
+ else:
+ if module.check_mode and not state == raid_status:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and state == raid_status) or (not module.check_mode and state == raid_status):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID,
+ action="ChangePDState"),
+ data={"TargetFQDD": drive_id, "State": state})
+ job_uri = resp.headers.get("Location")
+ job_id = job_uri.split("/")[-1]
+ return resp, job_uri, job_id
+
+
+def convert_raid_status(module, redfish_obj):
+ resp, job_uri, job_id = None, None, None
+ command, target = module.params["command"], module.params.get("target")
+ ctrl, pd_ready_state = None, []
+ try:
+ for ctrl in target:
+ controller_id = ctrl.split(":")[-1]
+ pd_resp = redfish_obj.invoke_request("GET", PD_URI.format(controller_id=controller_id, drive_id=ctrl))
+ raid_status = pd_resp.json_data["Oem"]["Dell"]["DellPhysicalDisk"]["RaidStatus"]
+ pd_ready_state.append(raid_status)
+ except HTTPError:
+ module.fail_json(msg=PD_ERROR_MSG.format(ctrl))
+ else:
+ if (command == "ConvertToRAID" and module.check_mode and 0 < pd_ready_state.count("NonRAID")) or \
+ (command == "ConvertToNonRAID" and module.check_mode and 0 < pd_ready_state.count("Ready")):
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (command == "ConvertToRAID" and module.check_mode and
+ len(pd_ready_state) == pd_ready_state.count("Ready")) or \
+ (command == "ConvertToRAID" and not module.check_mode and
+ len(pd_ready_state) == pd_ready_state.count("Ready")) or \
+ (command == "ConvertToNonRAID" and module.check_mode and
+ len(pd_ready_state) == pd_ready_state.count("NonRAID")) or \
+ (command == "ConvertToNonRAID" and not module.check_mode and
+ len(pd_ready_state) == pd_ready_state.count("NonRAID")):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID,
+ action=command),
+ data={"PDArray": target})
+ job_uri = resp.headers.get("Location")
+ job_id = job_uri.split("/")[-1]
+ return resp, job_uri, job_id
+
+
+def target_identify_pattern(module, redfish_obj):
+ target, volume = module.params.get("target"), module.params.get("volume_id")
+ command = module.params.get("command")
+ payload = {"TargetFQDD": None}
+
+ if target is not None and volume is None:
+ payload = {"TargetFQDD": target[0]}
+ elif volume is not None and target is None:
+ payload = {"TargetFQDD": volume[0]}
+ elif target is not None and volume is not None:
+ payload = {"TargetFQDD": target[0]}
+
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID,
+ action=command),
+ data=payload)
+ return resp
+
+
+def lock_virtual_disk(module, redfish_obj):
+ volume, command = module.params.get("volume_id"), module.params["command"]
+ resp, job_uri, job_id = None, None, None
+ controller_id = volume[0].split(":")[-1]
+ check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI)
+ volume_uri = VOLUME_URI + "/{volume_id}"
+ try:
+ volume_resp = redfish_obj.invoke_request("GET", volume_uri.format(system_id=SYSTEM_ID,
+ controller_id=controller_id,
+ volume_id=volume[0]))
+ links = volume_resp.json_data.get("Links")
+ if links:
+ for disk in volume_resp.json_data.get("Links").get("Drives"):
+ drive_link = disk["@odata.id"]
+ drive_resp = redfish_obj.invoke_request("GET", drive_link)
+ encryption_ability = drive_resp.json_data.get("EncryptionAbility")
+ if encryption_ability != "SelfEncryptingDrive":
+ module.fail_json(msg=PHYSICAL_DISK_ERR)
+ lock_status = volume_resp.json_data.get("Oem").get("Dell").get("DellVolume").get("LockStatus")
+ except HTTPError:
+ module.fail_json(msg=PD_ERROR_MSG.format(controller_id))
+ else:
+ if lock_status == "Unlocked" and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif lock_status == "Locked":
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID,
+ action="LockVirtualDisk"),
+ data={"TargetFQDD": volume[0]})
+ job_uri = resp.headers.get("Location")
+ job_id = job_uri.split("/")[-1]
+ return resp, job_uri, job_id
+
+
+def validate_inputs(module):
+ module_params = module.params
+ command = module_params.get("command")
+ mode = module_params.get("mode")
+ if command == "ReKey" and mode == "LKM":
+ key = module_params.get("key")
+ key_id = module_params.get("key_id")
+ old_key = module_params.get("old_key")
+ if not all([key, key_id, old_key]):
+ module.fail_json(msg="All of the following: key, key_id and old_key are "
+ "required for '{0}' operation.".format(command))
+ elif command == "EnableControllerEncryption" and mode == "LKM":
+ key = module_params.get("key")
+ key_id = module_params.get("key_id")
+ if not all([key, key_id]):
+ module.fail_json(msg="All of the following: key, key_id are "
+ "required for '{0}' operation.".format(command))
+ elif command in ["AssignSpare", "UnassignSpare", "BlinkTarget", "UnBlinkTarget", "LockVirtualDisk"]:
+ target, volume = module_params.get("target"), module_params.get("volume_id")
+ if target is not None and not 1 >= len(target):
+ module.fail_json(msg=TARGET_ERR_MSG.format("physical disk"))
+ if volume is not None and not 1 >= len(volume):
+ module.fail_json(msg=TARGET_ERR_MSG.format("virtual drive"))
+ elif command in ["ChangePDStateToOnline", "ChangePDStateToOffline"]:
+ target = module.params.get("target")
+ if target is not None and not 1 >= len(target):
+ module.fail_json(msg=TARGET_ERR_MSG.format("physical disk"))
+
+
+def main():
+ specs = {
+ "command": {"required": False, "default": "AssignSpare",
+ "choices": ["ResetConfig", "AssignSpare", "SetControllerKey", "RemoveControllerKey",
+ "ReKey", "UnassignSpare", "EnableControllerEncryption", "BlinkTarget",
+ "UnBlinkTarget", "ConvertToRAID", "ConvertToNonRAID", "ChangePDStateToOnline",
+ "ChangePDStateToOffline", "LockVirtualDisk"]},
+ "controller_id": {"required": False, "type": "str"},
+ "volume_id": {"required": False, "type": "list", "elements": "str"},
+ "target": {"required": False, "type": "list", "elements": "str", "aliases": ["drive_id"]},
+ "key": {"required": False, "type": "str", "no_log": True},
+ "key_id": {"required": False, "type": "str"},
+ "old_key": {"required": False, "type": "str", "no_log": True},
+ "mode": {"required": False, "choices": ["LKM", "SEKM"], "default": "LKM"},
+ "job_wait": {"required": False, "type": "bool", "default": False},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 120}
+ }
+ specs.update(redfish_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ["command", "SetControllerKey", ["controller_id", "key", "key_id"]],
+ ["command", "ReKey", ["controller_id", "mode"]], ["command", "ResetConfig", ["controller_id"]],
+ ["command", "RemoveControllerKey", ["controller_id"]], ["command", "AssignSpare", ["target"]],
+ ["command", "UnassignSpare", ["target"]], ["command", "EnableControllerEncryption", ["controller_id"]],
+ ["command", "BlinkTarget", ["target", "volume_id"], True],
+ ["command", "UnBlinkTarget", ["target", "volume_id"], True], ["command", "ConvertToRAID", ["target"]],
+ ["command", "ConvertToNonRAID", ["target"]], ["command", "ChangePDStateToOnline", ["target"]],
+ ["command", "ChangePDStateToOffline", ["target"]],
+ ["command", "LockVirtualDisk", ["volume_id"]]
+ ],
+ supports_check_mode=True)
+ validate_inputs(module)
+ try:
+ command = module.params["command"]
+ with Redfish(module.params, req_session=True) as redfish_obj:
+ if command == "ResetConfig":
+ resp, job_uri, job_id = ctrl_reset_config(module, redfish_obj)
+ elif command == "SetControllerKey" or command == "ReKey" or \
+ command == "RemoveControllerKey" or command == "EnableControllerEncryption":
+ resp, job_uri, job_id = ctrl_key(module, redfish_obj)
+ elif command == "AssignSpare" or command == "UnassignSpare":
+ resp, job_uri, job_id = hot_spare_config(module, redfish_obj)
+ elif command == "BlinkTarget" or command == "UnBlinkTarget":
+ resp = target_identify_pattern(module, redfish_obj)
+ if resp.success and resp.status_code == 200:
+ module.exit_json(msg=JOB_COMPLETION.format(command), changed=True)
+ elif command == "ConvertToRAID" or command == "ConvertToNonRAID":
+ resp, job_uri, job_id = convert_raid_status(module, redfish_obj)
+ elif command == "ChangePDStateToOnline" or command == "ChangePDStateToOffline":
+ resp, job_uri, job_id = change_pd_status(module, redfish_obj)
+ elif command == "LockVirtualDisk":
+ resp, job_uri, job_id = lock_virtual_disk(module, redfish_obj)
+ oem_job_url = JOB_URI_OEM.format(job_id=job_id)
+ job_wait = module.params["job_wait"]
+ if job_wait:
+ resp, msg = wait_for_job_completion(redfish_obj, oem_job_url, job_wait=job_wait,
+ wait_timeout=module.params["job_wait_timeout"])
+ job_data = strip_substr_dict(resp.json_data)
+ if job_data["JobState"] == "Failed":
+ changed, failed = False, True
+ else:
+ changed, failed = True, False
+ module.exit_json(msg=JOB_COMPLETION.format(command), task={"id": job_id, "uri": oem_job_url},
+ status=job_data, changed=changed, failed=failed)
+ else:
+ resp, msg = wait_for_job_completion(redfish_obj, oem_job_url, job_wait=job_wait,
+ wait_timeout=module.params["job_wait_timeout"])
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=JOB_SUBMISSION.format(command), task={"id": job_id, "uri": oem_job_url},
+ status=job_data)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, AttributeError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
new file mode 100644
index 000000000..8de5ffc9f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_reset
+short_description: Reset iDRAC
+version_added: "2.1.0"
+description:
+ - This module resets iDRAC.
+ - "iDRAC is not accessible for some time after running this module. It is recommended to wait for some time,
+ before trying to connect to iDRAC."
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Reset iDRAC
+ dellemc.openmanage.idrac_reset:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ idrac_port: 443
+ ca_path: "/path/to/ca_cert.pem"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of the iDRAC reset operation.
+ returned: always
+ type: str
+ sample: "Successfully performed iDRAC reset."
+reset_status:
+ description: Details of iDRAC reset operation.
+ returned: always
+ type: dict
+ sample: {
+ "idracreset": {
+ "Data": {
+ "StatusCode": 204
+ },
+ "Message": "none",
+ "Status": "Success",
+ "StatusCode": 204,
+ "retval": true
+ }
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+def run_idrac_reset(idrac, module):
+ if module.check_mode:
+ msg = {'Status': 'Success', 'Message': 'Changes found to commit!', 'changes_applicable': True}
+ else:
+ idrac.use_redfish = True
+ msg = idrac.config_mgr.reset_idrac()
+ return msg
+
+
+def main():
+ specs = {}
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ msg = run_idrac_reset(idrac, module)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as e:
+ module.fail_json(msg=str(e))
+ module.exit_json(msg="Successfully performed iDRAC reset.", reset_status=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
new file mode 100644
index 000000000..67a02c12e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
@@ -0,0 +1,666 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.0.0
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: idrac_server_config_profile
+short_description: Export or Import iDRAC Server Configuration Profile (SCP)
+version_added: "2.1.0"
+description:
+ - Export the Server Configuration Profile (SCP) from the iDRAC or import from a
+ network share (CIFS, NFS, HTTP, HTTPS) or a local file.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ command:
+ description:
+ - If C(import), the module performs SCP import operation.
+ - If C(export), the module performs SCP export operation.
+ - If C(preview), the module performs SCP preview operation.
+ type: str
+ choices: ['import', 'export', 'preview']
+ default: 'export'
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ required: True
+ share_name:
+ description:
+ - Network share or local path.
+ - CIFS, NFS, HTTP, and HTTPS network share types are supported.
+ type: str
+ required: True
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ aliases: ['share_pwd']
+ scp_file:
+ description:
+ - Name of the server configuration profile (SCP) file.
+ - This option is mandatory if I(command) is C(import).
+ - The default format <idrac_ip>_YYmmdd_HHMMSS_scp is used if this option is not specified for C(import).
+ - I(export_format) is used if the valid extension file is not provided for C(import).
+ type: str
+ scp_components:
+ description:
+ - If C(ALL), this module exports or imports all components configurations from SCP file.
+ - If C(IDRAC), this module exports or imports iDRAC configuration from SCP file.
+ - If C(BIOS), this module exports or imports BIOS configuration from SCP file.
+ - If C(NIC), this module exports or imports NIC configuration from SCP file.
+ - If C(RAID), this module exports or imports RAID configuration from SCP file.
+ type: str
+ choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
+ default: 'ALL'
+ shutdown_type:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(Graceful), the job gracefully shuts down the operating system and turns off the server.
+ - If C(Forced), it forcefully shuts down the server.
+ - If C(NoReboot), the job that applies the SCP will pause until you manually reboot the server.
+ type: str
+ choices: ['Graceful', 'Forced', 'NoReboot']
+ default: 'Graceful'
+ end_host_power_state:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(On), End host power state is on.
+ - If C(Off), End host power state is off.
+ type: str
+ choices: ['On' ,'Off']
+ default: 'On'
+ export_format:
+ description: Specify the output file format. This option is applicable for C(export) command.
+ type: str
+ choices: ['JSON', 'XML']
+ default: 'XML'
+ export_use:
+ description: Specify the type of server configuration profile (SCP) to be exported.
+ This option is applicable for C(export) command.
+ type: str
+ choices: ['Default', 'Clone', 'Replace']
+ default: 'Default'
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+ - "Felix Stephen (@felixs88)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module supports C(check_mode).
+ - To import Server Configuration Profile (SCP) on the iDRAC7 and iDRAC8-based servers,
+ the servers must have iDRAC Enterprise license or later.
+'''
+
+EXAMPLES = r'''
+---
+- name: Export SCP with IDRAC components in JSON format to a local path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "/scp_folder"
+ scp_components: IDRAC
+ scp_file: example_file
+ export_format: JSON
+ export_use: Clone
+ job_wait: True
+
+- name: Import SCP with IDRAC components in JSON format from a local path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "/scp_folder"
+ command: import
+ scp_components: "IDRAC"
+ scp_file: example_file.json
+ shutdown_type: Graceful
+ end_host_power_state: "On"
+ job_wait: False
+
+- name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.2:/share"
+ scp_components: "BIOS"
+ export_format: XML
+ export_use: Default
+ job_wait: True
+
+- name: Import SCP with BIOS components in XML format from a NFS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.2:/share"
+ command: import
+ scp_components: "BIOS"
+ scp_file: 192.168.0.1_20210618_162856.xml
+ shutdown_type: NoReboot
+ end_host_power_state: "Off"
+ job_wait: False
+
+- name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "\\\\192.168.0.2\\share"
+ share_user: share_username@domain
+ share_password: share_password
+ share_mnt: /mnt/cifs
+ scp_file: example_file.xml
+ scp_components: "RAID"
+ export_format: XML
+ export_use: Default
+ job_wait: True
+
+- name: Import SCP with RAID components in XML format from a CIFS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "\\\\192.168.0.2\\share"
+ share_user: share_username
+ share_password: share_password
+ share_mnt: /mnt/cifs
+ command: import
+ scp_components: "RAID"
+ scp_file: example_file.xml
+ shutdown_type: Forced
+ end_host_power_state: "On"
+ job_wait: True
+
+- name: Export SCP with ALL components in JSON format to a HTTP share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "http://192.168.0.3/share"
+ share_user: share_username
+ share_password: share_password
+ scp_file: example_file.json
+ scp_components: ALL
+ export_format: JSON
+ job_wait: False
+
+- name: Import SCP with ALL components in JSON format from a HTTP share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ share_name: "http://192.168.0.3/share"
+ share_user: share_username
+ share_password: share_password
+ scp_file: example_file.json
+ shutdown_type: Graceful
+ end_host_power_state: "On"
+ job_wait: True
+
+- name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://192.168.0.4/share"
+ share_user: share_username
+ share_password: share_password
+ scp_components: ALL
+ export_format: XML
+ export_use: Replace
+ job_wait: True
+
+- name: Import SCP with ALL components in XML format from a HTTPS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ share_name: "https://192.168.0.4/share"
+ share_user: share_username
+ share_password: share_password
+ scp_file: 192.168.0.1_20160618_164647.xml
+ shutdown_type: Graceful
+ end_host_power_state: "On"
+ job_wait: False
+
+- name: Preview SCP with ALL components in XML format from a CIFS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "\\\\192.168.0.2\\share"
+ share_user: share_username
+ share_password: share_password
+ command: preview
+ scp_components: "ALL"
+ scp_file: example_file.xml
+ job_wait: True
+
+- name: Preview SCP with ALL components in JSON format from a NFS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.2:/share"
+ command: preview
+ scp_components: "IDRAC"
+ scp_file: example_file.xml
+ job_wait: True
+
+- name: Preview SCP with ALL components in XML format from a HTTP share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "http://192.168.0.1/http-share"
+ share_user: share_username
+ share_password: share_password
+ command: preview
+ scp_components: "ALL"
+ scp_file: example_file.xml
+ job_wait: True
+
+- name: Preview SCP with ALL components in XML format from a local path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "/scp_folder"
+ command: preview
+ scp_components: "IDRAC"
+ scp_file: example_file.json
+ job_wait: False
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the import or export SCP job.
+ returned: always
+ sample: "Successfully imported the Server Configuration Profile"
+scp_status:
+ type: dict
+ description: SCP operation job and progress details from the iDRAC.
+ returned: success
+ sample:
+ {
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "XXX123",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import os
+import json
+import re
+import copy
+from datetime import datetime
+from os.path import exists
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+
+REDFISH_SCP_BASE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1"
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+INVALID_FILE = "Invalid file path provided."
+JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/{job_id}"
+
+
+def get_scp_file_format(module):
+ scp_file = module.params['scp_file']
+ if scp_file:
+ scp_file_name_format = scp_file
+ if not str(scp_file.lower()).endswith(('.xml', '.json')):
+ scp_file_name_format = "{0}.{1}".format(scp_file, module.params['export_format'].lower())
+ else:
+ d = datetime.now()
+ scp_file_name_format = "{0}_{1}{2}{3}_{4}{5}{6}_scp.{7}".format(
+ module.params["idrac_ip"], d.date().year, d.date().month, d.date().day,
+ d.time().hour, d.time().minute, d.time().second,
+ module.params['export_format'].lower())
+ return scp_file_name_format
+
+
+def response_format_change(response, params, file_name):
+ resp = {}
+ if params["job_wait"]:
+ response = response.json_data
+ response.pop("Description", None)
+ response.pop("Name", None)
+ response.pop("EndTime", None)
+ response.pop("StartTime", None)
+ response.pop("TaskState", None)
+ response.pop("Messages", None)
+ if response.get("Oem") is not None:
+ response.update(response["Oem"]["Dell"])
+ response.pop("Oem", None)
+ sep = "/" if "/" in params["share_name"] else "\\"
+ response["file"] = "{0}{1}{2}".format(params["share_name"], sep, file_name)
+ response["retval"] = True
+ else:
+ location = response.headers.get("Location")
+ job_id = location.split("/")[-1]
+ job_uri = JOB_URI.format(job_id=job_id)
+ resp["Data"] = {"StatusCode": response.status_code, "jobid": job_id, "next_uri": job_uri}
+ resp["Job"] = {"JobId": job_id, "ResourceURI": job_uri}
+ resp["Return"] = "JobCreated"
+ resp["Status"] = "Success"
+ resp["Message"] = "none"
+ resp["StatusCode"] = response.status_code
+ sep = "/" if "/" in params["share_name"] else "\\"
+ resp["file"] = "{0}{1}{2}".format(params["share_name"], sep, file_name)
+ resp["retval"] = True
+ response = resp
+ return response
+
+
+def run_export_import_scp_http(idrac, module):
+ share_url = urlparse(module.params["share_name"])
+ share = {}
+ scp_file = module.params.get("scp_file")
+ share["share_ip"] = share_url.netloc
+ share["share_name"] = share_url.path.strip('/')
+ share["share_type"] = share_url.scheme.upper()
+ share["file_name"] = scp_file
+ scp_file_name_format = scp_file
+ share["username"] = module.params.get("share_user")
+ share["password"] = module.params.get("share_password")
+ command = module.params["command"]
+ if command == "import":
+ scp_response = idrac.import_scp_share(shutdown_type=module.params["shutdown_type"],
+ host_powerstate=module.params["end_host_power_state"],
+ job_wait=module.params["job_wait"],
+ target=module.params["scp_components"], share=share, )
+ elif command == "export":
+ scp_file_name_format = get_scp_file_format(module)
+ share["file_name"] = scp_file_name_format
+ scp_response = idrac.export_scp(export_format=module.params["export_format"],
+ export_use=module.params["export_use"],
+ target=module.params["scp_components"],
+ job_wait=module.params["job_wait"], share=share, )
+ scp_response = response_format_change(scp_response, module.params, scp_file_name_format)
+ if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
+ module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ return scp_response
+
+
+def get_scp_share_details(module):
+ share_name = module.params.get("share_name")
+ command = module.params["command"]
+ scp_file_name_format = get_scp_file_format(module)
+ if ":" in share_name:
+ nfs_split = share_name.split(":")
+ share = {"share_ip": nfs_split[0], "share_name": nfs_split[1], "share_type": "NFS"}
+ if command == "export":
+ share["file_name"] = scp_file_name_format
+ elif "\\" in share_name:
+ ip_pattern = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
+ share_path = re.split(ip_pattern, share_name)
+ share_ip = re.findall(ip_pattern, share_name)
+ share_path_name = "\\".join(list(filter(None, share_path[-1].split("\\"))))
+ share = {"share_ip": share_ip[0], "share_name": share_path_name, "share_type": "CIFS",
+ "username": module.params.get("share_user"), "password": module.params.get("share_password")}
+ if command == "export":
+ share["file_name"] = scp_file_name_format
+ else:
+ share = {"share_type": "LOCAL", "share_name": share_name}
+ if command == "export":
+ share["file_name"] = scp_file_name_format
+ return share, scp_file_name_format
+
+
+def export_scp_redfish(module, idrac):
+ command = module.params["command"]
+ share, scp_file_name_format = get_scp_share_details(module)
+ if share["share_type"] == "LOCAL":
+ scp_response = idrac.export_scp(export_format=module.params["export_format"],
+ export_use=module.params["export_use"],
+ target=module.params["scp_components"],
+ job_wait=False, share=share, )
+ scp_response = wait_for_response(scp_response, module, share, idrac)
+ else:
+ scp_response = idrac.export_scp(export_format=module.params["export_format"],
+ export_use=module.params["export_use"],
+ target=module.params["scp_components"],
+ job_wait=module.params["job_wait"], share=share, )
+ scp_response = response_format_change(scp_response, module.params, scp_file_name_format)
+ if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
+ module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ return scp_response
+
+
+def wait_for_response(scp_resp, module, share, idrac):
+ task_uri = scp_resp.headers["Location"]
+ job_id = task_uri.split("/")[-1]
+ job_uri = JOB_URI.format(job_id=job_id)
+ wait_resp = idrac.wait_for_job_complete(task_uri, job_wait=True)
+ with open("{0}/{1}".format(share["share_name"], share["file_name"]), "w") as file_obj:
+ if module.params["export_format"] == "JSON":
+ json.dump(wait_resp.json_data, file_obj, indent=4)
+ else:
+ wait_resp_value = wait_resp.decode("utf-8")
+ file_obj.write(wait_resp_value)
+ if module.params["job_wait"]:
+ scp_resp = idrac.invoke_request(job_uri, "GET")
+ return scp_resp
+
+
+def preview_scp_redfish(module, idrac, http_share, import_job_wait=False):
+ command = module.params["command"]
+ scp_target = module.params["scp_components"]
+ job_wait_option = module.params["job_wait"]
+ if command == "import":
+ job_wait_option = import_job_wait
+ if http_share:
+ share_url = urlparse(module.params["share_name"])
+ share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'),
+ "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"),
+ "username": module.params.get("share_user"), "password": module.params.get("share_password")}
+ else:
+ share, scp_file_name_format = get_scp_share_details(module)
+ share["file_name"] = module.params.get("scp_file")
+ buffer_text = None
+ if share["share_type"] == "LOCAL":
+ scp_target = "ALL"
+ file_path = "{0}{1}{2}".format(share["share_name"], os.sep, share["file_name"])
+ if not exists(file_path):
+ module.fail_json(msg=INVALID_FILE)
+ with open(file_path, "r") as file_obj:
+ buffer_text = file_obj.read()
+ scp_response = idrac.import_preview(import_buffer=buffer_text, target=scp_target,
+ share=share, job_wait=job_wait_option)
+ scp_response = response_format_change(scp_response, module.params, share["file_name"])
+ if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
+ module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ return scp_response
+
+
+def import_scp_redfish(module, idrac, http_share):
+ command = module.params["command"]
+ scp_target = module.params["scp_components"]
+ job_wait = copy.copy(module.params["job_wait"])
+ if module.check_mode:
+ module.params["job_wait"] = True
+ scp_resp = preview_scp_redfish(module, idrac, http_share, import_job_wait=True)
+ if "SYS081" in scp_resp["MessageId"] or "SYS082" in scp_resp["MessageId"]:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ else:
+ module.fail_json(msg=scp_resp)
+ if http_share:
+ share_url = urlparse(module.params["share_name"])
+ share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'),
+ "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"),
+ "username": module.params.get("share_user"), "password": module.params.get("share_password")}
+ else:
+ share, scp_file_name_format = get_scp_share_details(module)
+ share["file_name"] = module.params.get("scp_file")
+ buffer_text = None
+ share_dict = share
+ if share["share_type"] == "LOCAL":
+ scp_target = "ALL"
+ file_path = "{0}{1}{2}".format(share["share_name"], os.sep, share["file_name"])
+ if not exists(file_path):
+ module.fail_json(msg=INVALID_FILE)
+ with open(file_path, "r") as file_obj:
+ buffer_text = file_obj.read()
+ share_dict = {}
+ module.params["job_wait"] = job_wait
+ scp_response = idrac.import_scp_share(shutdown_type=module.params["shutdown_type"],
+ host_powerstate=module.params["end_host_power_state"],
+ job_wait=module.params["job_wait"],
+ target=scp_target,
+ import_buffer=buffer_text, share=share_dict, )
+ scp_response = response_format_change(scp_response, module.params, share["file_name"])
+ if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
+ module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ return scp_response
+
+
+def main():
+ specs = {
+ "command": {"required": False, "type": 'str',
+ "choices": ['export', 'import', 'preview'], "default": 'export'},
+ "job_wait": {"required": True, "type": 'bool'},
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str',
+ "aliases": ['share_pwd'], "no_log": True},
+ "scp_components": {"required": False,
+ "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'],
+ "default": 'ALL'},
+ "scp_file": {"required": False, "type": 'str'},
+ "shutdown_type": {"required": False,
+ "choices": ['Graceful', 'Forced', 'NoReboot'],
+ "default": 'Graceful'},
+ "end_host_power_state": {"required": False,
+ "choices": ['On', 'Off'],
+ "default": 'On'},
+ "export_format": {"required": False, "type": 'str',
+ "choices": ['JSON', 'XML'], "default": 'XML'},
+ "export_use": {"required": False, "type": 'str',
+ "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ["command", "import", ["scp_file"]],
+ ["command", "preview", ["scp_file"]],
+ ],
+ supports_check_mode=True)
+
+ try:
+ changed = False
+ http_share = module.params["share_name"].lower().startswith(('http://', 'https://'))
+ with iDRACRedfishAPI(module.params) as idrac:
+ command = module.params['command']
+ if command == 'import':
+ if http_share:
+ scp_status = run_export_import_scp_http(idrac, module)
+ if "SYS069" in scp_status.get("MessageId", ""):
+ changed = False
+ elif "SYS053" in scp_status.get("MessageId", ""):
+ changed = True
+ else:
+ scp_status = import_scp_redfish(module, idrac, http_share)
+ if "No changes were applied" not in scp_status.get('Message', ""):
+ changed = True
+ elif "SYS043" in scp_status.get("MessageId", ""):
+ changed = True
+ elif "SYS069" in scp_status.get("MessageId", ""):
+ changed = False
+ elif command == "export":
+ if http_share:
+ scp_status = run_export_import_scp_http(idrac, module)
+ else:
+ scp_status = export_scp_redfish(module, idrac)
+ else:
+ scp_status = preview_scp_redfish(module, idrac, http_share, import_job_wait=False)
+ if module.params.get('job_wait'):
+ scp_status = strip_substr_dict(scp_status)
+ msg = "Successfully {0}ed the Server Configuration Profile."
+ module.exit_json(changed=changed, msg=msg.format(command), scp_status=scp_status)
+ else:
+ msg = "Successfully triggered the job to {0} the Server Configuration Profile."
+ module.exit_json(msg=msg.format(command), scp_status=scp_status)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (ImportError, ValueError, RuntimeError, SSLValidationError,
+ ConnectionError, KeyError, TypeError, IndexError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py
new file mode 100644
index 000000000..d078b0851
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_syslog
+short_description: Enable or disable the syslog on iDRAC
+version_added: "2.1.0"
+description:
+ - This module allows to enable or disable the iDRAC syslog.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+ - dellemc.openmanage.network_share_options
+options:
+ syslog:
+ description: Enables or disables an iDRAC syslog.
+ choices: [Enabled, Disabled]
+ type: str
+ default: Enabled
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Enable iDRAC syslog
+ dellemc.openmanage.idrac_syslog:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.2:/share"
+ share_password: "share_user_pwd"
+ share_user: "share_user_name"
+ share_mnt: "/mnt/share"
+ syslog: "Enabled"
+
+- name: Disable iDRAC syslog
+ dellemc.openmanage.idrac_syslog:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.2:/share"
+ share_password: "share_user_pwd"
+ share_user: "share_user_name"
+ share_mnt: "/mnt/share"
+ syslog: "Disabled"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the syslog export operation.
+ returned: always
+ type: str
+ sample: "Successfully fetch the syslogs."
+syslog_status:
+ description: Job details of the syslog operation.
+ returned: success
+ type: dict
+ sample: {
+ "@odata.context": "/redfish/v1/$metadata#DellJob.DellJob",
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_852940632485",
+ "@odata.type": "#DellJob.v1_0_2.DellJob",
+ "CompletionTime": "2020-03-27T02:27:45",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_852940632485",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "SYS053",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+try:
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+def run_setup_idrac_syslog(idrac, module):
+ idrac.use_redfish = True
+ upd_share = file_share_manager.create_share_obj(share_path=module.params['share_name'],
+ mount_point=module.params['share_mnt'],
+ isFolder=True,
+ creds=UserCredentials(
+ module.params['share_user'],
+ module.params['share_password']))
+ if not upd_share.IsValid:
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ idrac.config_mgr.set_liason_share(upd_share)
+ if module.check_mode:
+ if module.params['syslog'] == 'Enabled':
+ idrac.config_mgr.enable_syslog(apply_changes=False)
+ elif module.params['syslog'] == 'Disabled':
+ idrac.config_mgr.disable_syslog(apply_changes=False)
+ msg = idrac.config_mgr.is_change_applicable()
+ else:
+ if module.params['syslog'] == 'Enabled':
+ msg = idrac.config_mgr.enable_syslog()
+ elif module.params['syslog'] == 'Disabled':
+ msg = idrac.config_mgr.disable_syslog()
+ return msg
+
+
+def main():
+ specs = {
+ "share_name": {"required": True, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"required": False, "type": 'str'},
+ "syslog": {"required": False, "choices": ['Enabled', 'Disabled'], "default": 'Enabled'}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ msg = run_setup_idrac_syslog(idrac, module)
+ changed = False
+ if msg.get('Status') == "Success":
+ changed = True
+ if msg.get('Message') == "No changes found to commit!":
+ changed = False
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except AttributeError as err:
+ if "NoneType" in str(err):
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as e:
+ module.fail_json(msg=str(e))
+ module.exit_json(msg="Successfully fetch the syslogs.",
+ syslog_status=msg, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py
new file mode 100644
index 000000000..61827f2df
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_system_info
+short_description: Get the PowerEdge Server System Inventory
+version_added: "3.0.0"
+description:
+ - Get the PowerEdge Server System Inventory.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author: "Rajeev Arakkal (@rajeevarakkal)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Get System Inventory
+ dellemc.openmanage.idrac_system_info:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: "Overall system inventory information status."
+ returned: always
+ type: str
+ sample: "Successfully fetched the system inventory details."
+system_info:
+ type: dict
+ description: Details of the PowerEdge Server System Inventory.
+ returned: success
+ sample: {
+ "BIOS": [
+ {
+ "BIOSReleaseDate": "11/26/2019",
+ "FQDD": "BIOS.Setup.1-1",
+ "InstanceID": "DCIM:INSTALLED#741__BIOS.Setup.1-1",
+ "Key": "DCIM:INSTALLED#741__BIOS.Setup.1-1",
+ "SMBIOSPresent": "True",
+ "VersionString": "2.4.8"
+ }
+ ]
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+# Main
+def main():
+ specs = {}
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+ try:
+ with iDRACConnection(module.params) as idrac:
+ idrac.get_entityjson()
+ msg = idrac.get_json_device()
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, IOError, ValueError, TypeError, ConnectionError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(msg="Successfully fetched the system inventory details.",
+ system_info=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py
new file mode 100644
index 000000000..6227571c0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.0.0
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_timezone_ntp
+short_description: Configures time zone and NTP on iDRAC
+version_added: "2.1.0"
+deprecated:
+ removed_at_date: "2024-07-31"
+ why: Replaced with M(dellemc.openmanage.idrac_attributes).
+ alternative: Use M(dellemc.openmanage.idrac_attributes) instead.
+ removed_from_collection: dellemc.openmanage
+description:
+ - This module allows to configure time zone and NTP on iDRAC.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ setup_idrac_timezone:
+ type: str
+ description: Allows to configure time zone on iDRAC.
+ enable_ntp:
+ type: str
+ description: Allows to enable or disable NTP on iDRAC.
+ choices: [Enabled, Disabled]
+ ntp_server_1:
+ type: str
+ description: The IP address of the NTP server 1.
+ ntp_server_2:
+ type: str
+ description: The IP address of the NTP server 2.
+ ntp_server_3:
+ type: str
+ description: The IP address of the NTP server 3.
+ share_name:
+ type: str
+ description:
+ - (deprecated)Network share or a local path.
+ - This option is deprecated and will be removed in the later version.
+ share_user:
+ type: str
+ description:
+ - (deprecated)Network share user name. Use the format 'user@domain' or 'domain\\user' if user is part of a domain.
+ This option is mandatory for CIFS share.
+ - This option is deprecated and will be removed in the later version.
+ share_password:
+ type: str
+ description:
+ - (deprecated)Network share user password. This option is mandatory for CIFS share.
+ - This option is deprecated and will be removed in the later version.
+ aliases: ['share_pwd']
+ share_mnt:
+ type: str
+ description:
+ - (deprecated)Local mount path of the network share with read-write permission for ansible user.
+ This option is mandatory for network shares.
+ - This option is deprecated and will be removed in the later version.
+
+requirements:
+ - "omsdk >= 1.2.488"
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Anooja Vardhineni (@anooja-vardhineni)"
+notes:
+ - This module requires 'Administrator' privilege for I(idrac_user).
+ - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure time zone and NTP on iDRAC
+ dellemc.openmanage.idrac_timezone_ntp:
+ idrac_ip: "190.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ setup_idrac_timezone: "UTC"
+ enable_ntp: Enabled
+ ntp_server_1: "190.168.0.1"
+ ntp_server_2: "190.168.0.2"
+ ntp_server_3: "190.168.0.3"
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the timezone and ntp configuration.
+ returned: always
+ type: str
+ sample: "Successfully configured the iDRAC time settings."
+timezone_ntp_status:
+ description: Job details of the time zone setting operation.
+ returned: success
+ type: dict
+ sample: {
+ "@odata.context": "/redfish/v1/$metadata#DellJob.DellJob",
+ "@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_861801613971",
+ "@odata.type": "#DellJob.v1_0_0.DellJob",
+ "CompletionTime": "2020-04-06T19:06:01",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_861801613971",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "Successfully imported and applied Server Configuration Profile.",
+ "MessageArgs": [],
+ "MessageId": "SYS053",
+ "Name": "Import Configuration",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "Status": "Success",
+ "TargetSettingsURI": null,
+ "retval": true
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import os
+import tempfile
+from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+import json
+try:
+ from omdrivers.enums.iDRAC.iDRAC import NTPEnable_NTPConfigGroupTypes
+ from omsdk.sdkfile import file_share_manager
+ from omsdk.sdkcreds import UserCredentials
+except ImportError:
+ pass
+
+
+def run_idrac_timezone_config(idrac, module):
+ """
+ Get Lifecycle Controller status
+
+ Keyword arguments:
+ idrac -- iDRAC handle
+ module -- Ansible module
+ """
+ idrac.use_redfish = True
+ share_path = tempfile.gettempdir() + os.sep
+ upd_share = file_share_manager.create_share_obj(share_path=share_path, isFolder=True)
+ if not upd_share.IsValid:
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ idrac.config_mgr.set_liason_share(upd_share)
+
+ if module.params['setup_idrac_timezone'] is not None:
+ idrac.config_mgr.configure_timezone(module.params['setup_idrac_timezone'])
+
+ if module.params['enable_ntp'] is not None:
+ idrac.config_mgr.configure_ntp(
+ enable_ntp=NTPEnable_NTPConfigGroupTypes[module.params['enable_ntp']]
+ )
+ if module.params['ntp_server_1'] is not None:
+ idrac.config_mgr.configure_ntp(
+ ntp_server_1=module.params['ntp_server_1']
+ )
+ if module.params['ntp_server_2'] is not None:
+ idrac.config_mgr.configure_ntp(
+ ntp_server_2=module.params['ntp_server_2']
+ )
+ if module.params['ntp_server_3'] is not None:
+ idrac.config_mgr.configure_ntp(
+ ntp_server_3=module.params['ntp_server_3']
+ )
+
+ if module.check_mode:
+ msg = idrac.config_mgr.is_change_applicable()
+ else:
+ msg = idrac.config_mgr.apply_changes(reboot=False)
+ return msg
+
+
+# Main
+def main():
+ specs = {
+ # Export Destination
+ "share_name": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_user": {"required": False, "type": 'str'},
+ "share_mnt": {"required": False, "type": 'str'},
+
+ # setup NTP
+ "enable_ntp": {"required": False, "choices": ['Enabled', 'Disabled']},
+ "ntp_server_1": {"required": False},
+ "ntp_server_2": {"required": False},
+ "ntp_server_3": {"required": False},
+
+ # set up timezone
+ "setup_idrac_timezone": {"required": False, "type": 'str'},
+
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ with iDRACConnection(module.params) as idrac:
+ changed = False
+ msg = run_idrac_timezone_config(idrac, module)
+ if "Status" in msg:
+ if msg['Status'] == "Success":
+ changed = True
+ if "Message" in msg:
+ if msg['Message'] == "No changes found to commit!":
+ changed = False
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except AttributeError as err:
+ if "NoneType" in str(err):
+ module.fail_json(msg="Unable to access the share. Ensure that the share name, "
+ "share mount, and share credentials provided are correct.")
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as e:
+ module.fail_json(msg=str(e))
+ module.exit_json(msg="Successfully configured the iDRAC time settings.",
+ timezone_ntp_status=msg, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py
new file mode 100644
index 000000000..df9f9adbe
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py
@@ -0,0 +1,429 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_user
+short_description: Configure settings for user accounts
+version_added: "2.1.0"
+description:
+ - This module allows to perform the following,
+ - Add a new user account.
+ - Edit a user account.
+ - Enable or Disable a user account.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ state:
+ type: str
+ description:
+ - Select C(present) to create or modify a user account.
+ - Select C(absent) to remove a user account.
+ - Ensure Lifecycle Controller is available because the user operation
+ uses the capabilities of Lifecycle Controller.
+ choices: [present, absent]
+ default: present
+ user_name:
+ type: str
+ required: True
+ description: Provide the I(user_name) of the account to be created, deleted or modified.
+ user_password:
+ type: str
+ description:
+ - Provide the password for the user account. The password can be changed when the user account is modified.
+ - To ensure security, the I(user_password) must be at least eight characters long and must contain
+ lowercase and upper-case characters, numbers, and special characters.
+ new_user_name:
+ type: str
+ description: Provide the I(user_name) for the account to be modified.
+ privilege:
+ type: str
+ description:
+ - Following are the role-based privileges.
+ - A user with C(Administrator) privilege can log in to iDRAC, and then configure iDRAC, configure users,
+ clear logs, control and configure system, access virtual console, access virtual media, test alerts,
+ and execute debug commands.
+ - A user with C(Operator) privilege can log in to iDRAC, and then configure iDRAC, control and configure system,
+ access virtual console, access virtual media, and execute debug commands.
+ - A user with C(ReadOnly) privilege can only log in to iDRAC.
+ - A user with C(None), no privileges assigned.
+ choices: [Administrator, ReadOnly, Operator, None]
+ ipmi_lan_privilege:
+ type: str
+ description: The Intelligent Platform Management Interface LAN privilege level assigned to the user.
+ choices: [Administrator, Operator, User, No Access]
+ ipmi_serial_privilege:
+ type: str
+ description:
+ - The Intelligent Platform Management Interface Serial Port privilege level assigned to the user.
+ - This option is only applicable for rack and tower servers.
+ choices: [Administrator, Operator, User, No Access]
+ enable:
+ type: bool
+ description: Provide the option to enable or disable a user from logging in to iDRAC.
+ sol_enable:
+ type: bool
+ description: Enables Serial Over Lan (SOL) for an iDRAC user.
+ protocol_enable:
+ type: bool
+ description: Enables protocol for the iDRAC user.
+ authentication_protocol:
+ type: str
+ description:
+ - This option allows to configure one of the following authentication protocol
+ types to authenticate the iDRAC user.
+ - Secure Hash Algorithm C(SHA).
+ - Message Digest 5 C(MD5).
+ - An authentication protocol is not configured if C(None) is selected.
+ choices: [None, SHA, MD5]
+ privacy_protocol:
+ type: str
+ description:
+ - This option allows to configure one of the following privacy encryption protocols for the iDRAC user.
+ - Data Encryption Standard C(DES).
+ - Advanced Encryption Standard C(AES).
+ - A privacy protocol is not configured if C(None) is selected.
+ choices: [None, DES, AES]
+requirements:
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to DellEMC iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure a new iDRAC user
+ dellemc.openmanage.idrac_user:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ user_name: user_name
+ user_password: user_password
+ privilege: Administrator
+ ipmi_lan_privilege: Administrator
+ ipmi_serial_privilege: Administrator
+ enable: true
+ sol_enable: true
+ protocol_enable: true
+ authentication_protocol: SHA
+ privacy_protocol: AES
+
+- name: Modify existing iDRAC user username and password
+ dellemc.openmanage.idrac_user:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ user_name: user_name
+ new_user_name: new_user_name
+ user_password: user_password
+
+- name: Delete existing iDRAC user account
+ dellemc.openmanage.idrac_user:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ user_name: user_name
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of the iDRAC user configuration.
+ returned: always
+ type: str
+ sample: "Successfully created user account details."
+status:
+ description: Configures the iDRAC users attributes.
+ returned: success
+ type: dict
+ sample: {
+ "@Message.ExtendedInfo": [{
+ "Message": "Successfully Completed Request",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "Base.1.5.Success",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "None",
+ "Severity": "OK"
+ }, {
+ "Message": "The operation successfully completed.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "IDRAC.2.1.SYS413",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "No response action is required.",
+ "Severity": "Informational"}
+ ]}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+import re
+import time
+from ssl import SSLError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+
+
+ACCOUNT_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/"
+ATTRIBUTE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Attributes/"
+PRIVILEGE = {"Administrator": 511, "Operator": 499, "ReadOnly": 1, "None": 0}
+ACCESS = {0: "Disabled", 1: "Enabled"}
+
+
+def compare_payload(json_payload, idrac_attr):
+ """
+ :param json_payload: json payload created for update operation
+ :param idrac_attr: idrac user attributes
+ case1: always skip password for difference
+ case2: as idrac_attr returns privilege in the format of string so
+ convert payload to string only for comparision
+ :return: bool
+ """
+ copy_json = json_payload.copy()
+ for key, val in dict(copy_json).items():
+ split_key = key.split("#")[1]
+ if split_key == "Password":
+ is_change_required = True
+ break
+ if split_key == "Privilege":
+ copy_json[key] = str(val)
+ else:
+ is_change_required = bool(list(set(copy_json.items()) - set(idrac_attr.items())))
+ return is_change_required
+
+
+def get_user_account(module, idrac):
+ """
+ This function gets the slot id and slot uri for create and modify.
+ :param module: ansible module arguments
+ :param idrac: idrac objects
+ :return: user_attr, slot_uri, slot_id, empty_slot, empty_slot_uri
+ """
+ slot_uri, slot_id, empty_slot, empty_slot_uri = None, None, None, None
+ if not module.params["user_name"]:
+ module.fail_json(msg="User name is not valid.")
+ response = idrac.export_scp(export_format="JSON", export_use="Default", target="IDRAC", job_wait=True)
+ user_attributes = idrac.get_idrac_local_account_attr(response.json_data, fqdd="iDRAC.Embedded.1")
+ slot_num = tuple(range(2, 17))
+ for num in slot_num:
+ user_name = "Users.{0}#UserName".format(num)
+ if user_attributes.get(user_name) == module.params["user_name"]:
+ slot_id = num
+ slot_uri = ACCOUNT_URI + str(num)
+ break
+ if not user_attributes.get(user_name) and (empty_slot_uri and empty_slot) is None:
+ empty_slot = num
+ empty_slot_uri = ACCOUNT_URI + str(num)
+ return user_attributes, slot_uri, slot_id, empty_slot, empty_slot_uri
+
+
+def get_payload(module, slot_id, action=None):
+ """
+ This function creates the payload with slot id.
+ :param module: ansible module arguments
+ :param action: new user name is only applicable in case of update user name.
+ :param slot_id: slot id for user slot
+ :return: json data with slot id
+ """
+ slot_payload = {"Users.{0}.UserName": module.params["user_name"],
+ "Users.{0}.Password": module.params["user_password"],
+ "Users.{0}.Enable": ACCESS.get(module.params["enable"]),
+ "Users.{0}.Privilege": PRIVILEGE.get(module.params["privilege"]),
+ "Users.{0}.IpmiLanPrivilege": module.params["ipmi_lan_privilege"],
+ "Users.{0}.IpmiSerialPrivilege": module.params["ipmi_serial_privilege"],
+ "Users.{0}.SolEnable": ACCESS.get(module.params["sol_enable"]),
+ "Users.{0}.ProtocolEnable": ACCESS.get(module.params["protocol_enable"]),
+ "Users.{0}.AuthenticationProtocol": module.params["authentication_protocol"],
+ "Users.{0}.PrivacyProtocol": module.params["privacy_protocol"], }
+ if module.params["new_user_name"] is not None and action == "update":
+ user_name = "Users.{0}.UserName".format(slot_id)
+ slot_payload[user_name] = module.params["new_user_name"]
+ elif module.params["state"] == "absent":
+ slot_payload = {"Users.{0}.UserName": "", "Users.{0}.Enable": "Disabled", "Users.{0}.Privilege": 0,
+ "Users.{0}.IpmiLanPrivilege": "No Access", "Users.{0}.IpmiSerialPrivilege": "No Access",
+ "Users.{0}.SolEnable": "Disabled", "Users.{0}.ProtocolEnable": "Disabled",
+ "Users.{0}.AuthenticationProtocol": "SHA", "Users.{0}.PrivacyProtocol": "AES"}
+ payload = dict([(k.format(slot_id), v) for k, v in slot_payload.items() if v is not None])
+ return payload
+
+
+def convert_payload_xml(payload):
+ """
+ this function converts payload to xml and json data.
+ :param payload: user input for payload
+ :return: returns xml and json data
+ """
+ root = """<SystemConfiguration><Component FQDD="iDRAC.Embedded.1">{0}</Component></SystemConfiguration>"""
+ attr = ""
+ json_payload = {}
+ for k, v in payload.items():
+ key = re.sub(r"(?<=\d)\.", "#", k)
+ attr += '<Attribute Name="{0}">{1}</Attribute>'.format(key, v)
+ json_payload[key] = v
+ root = root.format(attr)
+ return root, json_payload
+
+
+def create_or_modify_account(module, idrac, slot_uri, slot_id, empty_slot_id, empty_slot_uri, user_attr):
+ """
+ This function create user account in case not exists else update it.
+ :param module: user account module arguments
+ :param idrac: idrac object
+ :param slot_uri: slot uri for update
+ :param slot_id: slot id for update
+ :param empty_slot_id: empty slot id for create
+ :param empty_slot_uri: empty slot uri for create
+ :return: json
+ """
+ generation, firmware_version = idrac.get_server_generation
+ msg, response = "Unable to retrieve the user details.", {}
+ if (slot_id and slot_uri) is None and (empty_slot_id and empty_slot_uri) is not None:
+ msg = "Successfully created user account."
+ payload = get_payload(module, empty_slot_id, action="create")
+ if module.check_mode:
+ module.exit_json(msg="Changes found to commit!", changed=True)
+ if generation >= 14:
+ response = idrac.invoke_request(ATTRIBUTE_URI, "PATCH", data={"Attributes": payload})
+ elif generation < 14:
+ xml_payload, json_payload = convert_payload_xml(payload)
+ time.sleep(10)
+ response = idrac.import_scp(import_buffer=xml_payload, target="ALL", job_wait=True)
+ elif (slot_id and slot_uri) is not None:
+ msg = "Successfully updated user account."
+ payload = get_payload(module, slot_id, action="update")
+ xml_payload, json_payload = convert_payload_xml(payload)
+ value = compare_payload(json_payload, user_attr)
+ if module.check_mode:
+ if value:
+ module.exit_json(msg="Changes found to commit!", changed=True)
+ module.exit_json(msg="No changes found to commit!")
+ if not value:
+ module.exit_json(msg="Requested changes are already present in the user slot.")
+ if generation >= 14:
+ response = idrac.invoke_request(ATTRIBUTE_URI, "PATCH", data={"Attributes": payload})
+ elif generation < 14:
+ time.sleep(10)
+ response = idrac.import_scp(import_buffer=xml_payload, target="ALL", job_wait=True)
+ elif (slot_id and slot_uri and empty_slot_id and empty_slot_uri) is None:
+ module.fail_json(msg="Maximum number of users reached. Delete a user account and retry the operation.")
+ return response, msg
+
+
+def remove_user_account(module, idrac, slot_uri, slot_id):
+ """
+ remove user user account by passing empty payload details.
+ :param module: user account module arguments.
+ :param idrac: idrac object.
+ :param slot_uri: user slot uri.
+ :param slot_id: user slot id.
+ :return: json.
+ """
+ response, msg = {}, "Successfully deleted user account."
+ payload = get_payload(module, slot_id, action="delete")
+ xml_payload, json_payload = convert_payload_xml(payload)
+ if module.check_mode and (slot_id and slot_uri) is not None:
+ module.exit_json(msg="Changes found to commit!", changed=True)
+ elif module.check_mode and (slot_uri and slot_id) is None:
+ module.exit_json(msg="No changes found to commit!")
+ elif not module.check_mode and (slot_uri and slot_id) is not None:
+ time.sleep(10)
+ response = idrac.import_scp(import_buffer=xml_payload, target="ALL", job_wait=True)
+ else:
+ module.exit_json(msg="The user account is absent.")
+ return response, msg
+
+
+def main():
+ specs = {
+ "state": {"required": False, "choices": ['present', 'absent'], "default": "present"},
+ "new_user_name": {"required": False},
+ "user_name": {"required": True},
+ "user_password": {"required": False, "no_log": True},
+ "privilege": {"required": False, "choices": ['Administrator', 'ReadOnly', 'Operator', 'None']},
+ "ipmi_lan_privilege": {"required": False, "choices": ['Administrator', 'Operator', 'User', 'No Access']},
+ "ipmi_serial_privilege": {"required": False, "choices": ['Administrator', 'Operator', 'User', 'No Access']},
+ "enable": {"required": False, "type": "bool"},
+ "sol_enable": {"required": False, "type": "bool"},
+ "protocol_enable": {"required": False, "type": "bool"},
+ "authentication_protocol": {"required": False, "choices": ['SHA', 'MD5', 'None']},
+ "privacy_protocol": {"required": False, "choices": ['AES', 'DES', 'None']},
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+ try:
+ with iDRACRedfishAPI(module.params, req_session=True) as idrac:
+ user_attr, slot_uri, slot_id, empty_slot_id, empty_slot_uri = get_user_account(module, idrac)
+ if module.params["state"] == "present":
+ response, message = create_or_modify_account(module, idrac, slot_uri, slot_id, empty_slot_id,
+ empty_slot_uri, user_attr)
+ elif module.params["state"] == "absent":
+ response, message = remove_user_account(module, idrac, slot_uri, slot_id)
+ error = response.json_data.get("error")
+ oem = response.json_data.get("Oem")
+ if oem:
+ oem_msg = oem.get("Dell").get("Message")
+ error_msg = ["Unable to complete application of configuration profile values.",
+ "Import of Server Configuration Profile operation completed with errors."]
+ if oem_msg in error_msg:
+ module.fail_json(msg=oem_msg, error_info=response.json_data)
+ if error:
+ module.fail_json(msg=error.get("message"), error_info=response.json_data)
+ module.exit_json(msg=message, status=response.json_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, SSLError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py
new file mode 100644
index 000000000..ac22541eb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py
@@ -0,0 +1,468 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 6.3.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_virtual_media
+short_description: Configure the Remote File Share settings.
+version_added: "6.3.0"
+description:
+ - This module allows to configure Remote File Share settings.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ virtual_media:
+ required: true
+ type: list
+ elements: dict
+ description: Details of the Remote File Share.
+ suboptions:
+ insert:
+ required: true
+ type: bool
+ description:
+ - C(True) connects the remote image file.
+ - C(False) ejects the remote image file if connected.
+ image:
+ type: path
+ description:
+ - The path of the image file. The supported file types are .img and .iso.
+ - The file name with .img extension is redirected as a virtual floppy and a file name with .iso extension is
+ redirected as a virtual CDROM.
+ - This option is required when I(insert) is C(True).
+ - "The following are the examples of the share location:
+ CIFS share: //192.168.0.1/file_path/image_name.iso,
+ NFS share: 192.168.0.2:/file_path/image_name.img,
+ HTTP share: http://192.168.0.3/file_path/image_name.iso,
+ HTTPS share: https://192.168.0.4/file_path/image_name.img"
+ - CIFS share is not supported by iDRAC7 and iDRAC8.
+ - HTTPS share with credentials is not supported by iDRAC7 and iDRAC8.
+ index:
+ type: int
+ description:
+ - Index of the Remote File Share. For example, to specify the Remote File Share 1, the value of I(index)
+ should be 1. If I(index) is not specified, the order of I(virtual_media) list will be considered.
+ domain:
+ type: str
+ description: Domain name of network share. This option is applicable for CIFS and HTTPS share.
+ username:
+ type: str
+ description: Network share username. This option is applicable for CIFS and HTTPS share.
+ password:
+ type: str
+ description:
+ - Network share password. This option is applicable for CIFS and HTTPS share.
+ - This module always reports as the changes found when I(password) is provided.
+ media_type:
+ type: str
+ description: Type of the image file. This is applicable when I(insert) is C(True).
+ choices: [CD, DVD, USBStick]
+ force:
+ type: bool
+ description: C(True) ejects the image file if already connected and inserts the file provided in I(image).
+ This is applicable when I(insert) is C(True).
+ default: false
+ resource_id:
+ type: str
+ description: Resource id of the iDRAC, if not specified manager collection id will be used.
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports C(check_mode).
+"""
+
+
+EXAMPLES = """
+---
+- name: Insert image file to Remote File Share 1 using CIFS share.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ virtual_media:
+ - insert: true
+ image: "//192.168.0.2/file_path/file.iso"
+ username: "username"
+ password: "password"
+
+- name: Insert image file to Remote File Share 2 using NFS share.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ virtual_media:
+ - index: 2
+ insert: true
+ image: "192.168.0.4:/file_path/file.iso"
+
+- name: Insert image file to Remote File Share 1 and 2 using HTTP.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
+ - index: 1
+ insert: true
+ image: "http://192.168.0.4/file_path/file.img"
+ - index: 2
+ insert: true
+ image: "http://192.168.0.4/file_path/file.img"
+
+- name: Insert image file using HTTPS.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
+ - index: 1
+ insert: true
+ image: "https://192.168.0.5/file_path/file.img"
+ username: username
+ password: password
+
+- name: Eject multiple virtual media.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
+ - index: 1
+ insert: false
+ - index: 2
+ insert: false
+
+- name: Ejection of image file from Remote File Share 1.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
+ insert: false
+
+- name: Insertion and ejection of image file in single task.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
+ - index: 1
+ insert: true
+ image: https://192.168.0.5/file/file.iso
+ username: username
+ password: password
+ - index: 2
+ insert: false
+"""
+
+
+RETURN = r'''
+---
+msg:
+ description: Successfully performed the virtual media operation.
+ returned: success
+ type: str
+ sample: Successfully performed the virtual media operation.
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+import copy
+import time
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+
+MANAGER_BASE = "/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia"
+SYSTEM_BASE = "/redfish/v1/Systems/System.Embedded.1/VirtualMedia"
+
+EXCEEDED_ERROR = "Unable to complete the operation because the virtual media settings " \
+ "provided exceeded the maximum limit."
+NO_CHANGES_FOUND = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+INVALID_INDEX = "Unable to compete the virtual media operation because the index provided is incorrect or invalid."
+FAIL_MSG = "Unable to complete the virtual media operation."
+SUCCESS_MSG = "Successfully performed the virtual media operation."
+UNSUPPORTED_IMAGE = "Unable to complete the virtual media operation because unsupported image " \
+ "provided. The supported file types are .img and .iso."
+UNSUPPORTED_MEDIA = "Unable to complete the virtual media operation because unsupported media type " \
+ "provided for index {0}"
+UNSUPPORTED_MSG = "The system does not support the CIFS network share feature."
+UNSUPPORTED_MSG_HTTPS = "The system does not support the HTTPS network share feature with credentials."
+
+
+def get_virtual_media_info(idrac):
+ resp = idrac.invoke_request("/redfish/v1/", "GET")
+ redfish_version = resp.json_data["RedfishVersion"]
+ rd_version = redfish_version.replace(".", "")
+ if 1131 <= int(rd_version):
+ vr_id = "system"
+ member_resp = idrac.invoke_request("{0}?$expand=*($levels=1)".format(SYSTEM_BASE), "GET")
+ else:
+ vr_id = "manager"
+ member_resp = idrac.invoke_request("{0}?$expand=*($levels=1)".format(MANAGER_BASE), "GET")
+ response = member_resp.json_data["Members"]
+ return response, vr_id, rd_version
+
+
+def get_payload_data(each, vr_members, vr_id):
+ is_change, unsup_media, input_vr_mem = False, None, {}
+ vr_mem = vr_members[each["index"] - 1]
+
+ if each["insert"]:
+ exist_vr_mem = dict((k, vr_mem[k]) for k in ["Inserted", "Image", "UserName", "Password"] if vr_mem.get(k) is not None)
+ input_vr_mem = {"Inserted": each["insert"], "Image": each["image"]}
+ if each["image"].startswith("//") or each["image"].lower().startswith("https://"):
+ username, password, domain = each.get("username"), each.get("password"), each.get("domain")
+ if username is not None:
+ if domain is not None:
+ username = "{0}\\{1}".format(domain, username)
+ input_vr_mem["UserName"] = username
+ if password is not None:
+ input_vr_mem["Password"] = password
+ else:
+ exist_vr_mem.pop("UserName", None)
+ exist_vr_mem.pop("Password", None)
+
+ inp_mt = each.get("media_type")
+ if inp_mt is not None and inp_mt == "CD" and input_vr_mem["Image"][-4:].lower() != ".iso":
+ unsup_media = each["index"]
+ if inp_mt is not None and inp_mt == "DVD" and input_vr_mem["Image"][-4:].lower() != ".iso":
+ unsup_media = each["index"]
+ if inp_mt is not None and inp_mt == "USBStick" and input_vr_mem["Image"][-4:].lower() != ".img":
+ unsup_media = each["index"]
+
+ is_change = bool(set(exist_vr_mem.items()) ^ set(input_vr_mem.items()))
+ else:
+ if vr_id == "manager":
+ for vr_v in vr_members:
+ exist_vr_mem = dict((k, vr_v[k]) for k in ["Inserted"])
+ input_vr_mem = {"Inserted": each.get("insert")}
+ is_change = bool(set(exist_vr_mem.items()) ^ set(input_vr_mem.items()))
+ if is_change:
+ vr_mem = vr_v
+ break
+ else:
+ exist_vr_mem = dict((k, vr_mem[k]) for k in ["Inserted"])
+ input_vr_mem = {"Inserted": each.get("insert")}
+ is_change = bool(set(exist_vr_mem.items()) ^ set(input_vr_mem.items()))
+
+ return is_change, input_vr_mem, vr_mem, unsup_media
+
+
+def _validate_params(module, vr_members, rd_version):
+ image = vr_members.get("image")
+ if image is not None and (image.startswith("//") or image.startswith("\\\\")):
+ if vr_members.get("username") is None or vr_members.get("password") is None:
+ module.fail_json(msg="CIFS share required username and password.")
+ if image is not None and image.startswith("\\\\"):
+ vr_members["image"] = image.replace("\\", "/")
+ if 140 >= int(rd_version) and image is not None:
+ if (vr_members.get("username") is not None or vr_members.get("password") is not None) and \
+ image.startswith("https://"):
+ module.fail_json(msg=UNSUPPORTED_MSG_HTTPS)
+ elif image.startswith("\\\\") or image.startswith("//"):
+ module.fail_json(msg=UNSUPPORTED_MSG)
+
+
+def virtual_media_operation(idrac, module, payload, vr_id):
+ err_payload, inserted = [], []
+ force = module.params["force"]
+
+ for i in payload:
+ try:
+ if force and i["vr_mem"]["Inserted"] and i["payload"]["Inserted"]:
+ idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.EjectMedia"]["target"],
+ "POST", data="{}", dump=False)
+ time.sleep(5)
+ idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.InsertMedia"]["target"],
+ "POST", data=i["payload"])
+ elif not force and i["vr_mem"]["Inserted"] and i["payload"]["Inserted"]:
+ idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.EjectMedia"]["target"],
+ "POST", data="{}", dump=False)
+ time.sleep(5)
+ idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.InsertMedia"]["target"],
+ "POST", data=i["payload"])
+ elif not i["vr_mem"]["Inserted"] and i["payload"]["Inserted"]:
+ idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.InsertMedia"]["target"],
+ "POST", data=i["payload"])
+ elif i["vr_mem"]["Inserted"] and not i["payload"]["Inserted"]:
+ idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.EjectMedia"]["target"],
+ "POST", data="{}", dump=False)
+ time.sleep(5)
+ except Exception as err:
+ error = json.load(err).get("error")
+ if vr_id == "manager":
+ msg_id = error["@Message.ExtendedInfo"][0]["MessageId"]
+ if "VRM0021" in msg_id or "VRM0012" in msg_id:
+ uri = i["vr_mem"]["Actions"]["#VirtualMedia.EjectMedia"]["target"]
+ if "RemovableDisk" in uri:
+ uri = uri.replace("RemovableDisk", "CD")
+ elif "CD" in uri:
+ uri = uri.replace("CD", "RemovableDisk")
+ idrac.invoke_request(uri, "POST", data="{}", dump=False)
+ time.sleep(5)
+ idrac.invoke_request(i["vr_mem"]["Actions"]["#VirtualMedia.InsertMedia"]["target"],
+ "POST", data=i["payload"])
+ else:
+ err_payload.append(error)
+ else:
+ err_payload.append(error)
+ return err_payload
+
+
+def virtual_media(idrac, module, vr_members, vr_id, rd_version):
+ vr_input = module.params["virtual_media"]
+ vr_input_copy = copy.deepcopy(vr_input)
+ vr_index, invalid_idx, manager_idx = [], [], 0
+
+ for idx, value in enumerate(vr_input_copy, start=1):
+ if vr_id == "manager":
+ if value.get("index") is not None:
+ manager_idx = value["index"]
+ if value.get("image") is not None and value.get("image")[-4:] == ".img":
+ value["index"] = 1
+ elif value.get("image") is not None and value.get("image")[-4:] == ".iso":
+ value["index"] = 2
+ elif not value["insert"] and value["index"] is None:
+ value["index"] = idx
+ else:
+ if value.get("index") is None:
+ value["index"] = idx
+ if value["index"] == 0:
+ invalid_idx.append(value["index"])
+ vr_index.append(value["index"])
+
+ _validate_params(module, value, rd_version)
+
+ if ((len(set(vr_index)) != len(vr_index)) or (len(vr_members) < max(vr_index)) or invalid_idx) and vr_id == "system":
+ module.fail_json(msg=INVALID_INDEX)
+ if (vr_id == "manager") and (1 < manager_idx):
+ module.fail_json(msg=INVALID_INDEX)
+ payload, unsupported_media = [], []
+ for each in vr_input_copy:
+
+ is_change, ret_payload, action, unsup_media = get_payload_data(each, vr_members, vr_id)
+ if unsup_media is not None:
+ unsupported_media.append(unsup_media)
+ if module.params["force"] and not is_change and each["insert"]:
+ is_change = True
+ if is_change:
+ payload.append({"payload": ret_payload, "vr_mem": action, "input": each})
+
+ if unsupported_media:
+ if vr_id == "manager":
+ module.fail_json(msg=UNSUPPORTED_MEDIA.format("1"))
+ module.fail_json(msg=UNSUPPORTED_MEDIA.format(", ".join(list(map(str, unsupported_media)))))
+
+ if module.check_mode and payload:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif module.check_mode and not payload:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ elif not module.check_mode and not payload:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+
+ status = virtual_media_operation(idrac, module, payload, vr_id)
+
+ return status
+
+
+def _validate_image_format(module):
+ unsup_image = False
+ for each in module.params["virtual_media"]:
+ if each["insert"] and each.get("image") is not None and each.get("image")[-4:].lower() not in [".iso", ".img"]:
+ unsup_image = True
+ if unsup_image:
+ module.fail_json(msg=UNSUPPORTED_IMAGE)
+
+
+def main():
+ specs = {
+ "virtual_media": {
+ "required": True, "type": "list", "elements": "dict",
+ "options": {
+ "insert": {"required": True, "type": "bool"},
+ "image": {"required": False, "type": "path"},
+ "index": {"required": False, "type": "int"},
+ "domain": {"required": False, "type": "str"},
+ "username": {"required": False, "type": "str"},
+ "password": {"required": False, "type": "str", "no_log": True},
+ "media_type": {"required": False, "type": "str", "choices": ["CD", "DVD", "USBStick"]},
+ },
+ "required_if": [["insert", True, ("image", )]],
+ },
+ "force": {"required": False, "type": "bool", "default": False},
+ "resource_id": {"required": False, "type": 'str'},
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(argument_spec=specs, supports_check_mode=True)
+ try:
+ with iDRACRedfishAPI(module.params, req_session=True) as idrac:
+ vr_media = module.params["virtual_media"]
+ vr_members, vr_id, rd_version = get_virtual_media_info(idrac)
+ if (len(vr_media) > len(vr_members) and vr_id == "system") or \
+ (len(vr_media) > 1 and vr_id == "manager"):
+ module.fail_json(msg=EXCEEDED_ERROR)
+ _validate_image_format(module)
+ resp = virtual_media(idrac, module, vr_members, vr_id, rd_version)
+ if resp:
+ module.fail_json(msg=FAIL_MSG, error_info=resp)
+ module.exit_json(msg=SUCCESS_MSG, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py
new file mode 100644
index 000000000..98235b9d3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py
@@ -0,0 +1,457 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_active_directory
+short_description: Configure Active Directory groups to be used with Directory Services
+description: "This module allows to add, modify, and delete OpenManage Enterprise connection with Active Directory
+Service."
+version_added: "4.0.0"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ domain_server:
+ type: list
+ elements: str
+ description:
+ - Enter the domain name or FQDN or IP address of the domain controller.
+ - If I(domain_controller_lookup) is C(DNS), enter the domain name to query DNS for the domain controllers.
+ - "If I(domain_controller_lookup) is C(MANUAL), enter the FQDN or the IP address of the domain controller.
+ The maximum number of Active Directory servers that can be added is three."
+ domain_controller_lookup:
+ type: str
+ description:
+ - Select the Domain Controller Lookup method.
+ choices:
+ - DNS
+ - MANUAL
+ default: DNS
+ domain_controller_port:
+ type: int
+ description:
+ - Domain controller port.
+ - By default, Global Catalog Address port number 3269 is populated.
+ - For the Domain Controller Access, enter 636 as the port number.
+ - C(NOTE), Only LDAPS ports are supported.
+ default: 3269
+ group_domain:
+ type: str
+ description:
+ - Provide the group domain in the format C(example.com) or C(ou=org, dc=example, dc=com).
+ id:
+ type: int
+ description:
+ - Provide the ID of the existing Active Directory service connection.
+ - This is applicable for modification and deletion.
+ - This is mutually exclusive with I(name).
+ name:
+ type: str
+ description:
+ - Provide a name for the Active Directory connection.
+ - This is applicable for creation and deletion.
+ - This is mutually exclusive with I(name).
+ network_timeout:
+ type: int
+ description:
+ - Enter the network timeout duration in seconds.
+ - The supported timeout duration range is 15 to 300 seconds.
+ default: 120
+ search_timeout:
+ type: int
+ description:
+ - Enter the search timeout duration in seconds.
+ - The supported timeout duration range is 15 to 300 seconds.
+ default: 120
+ state:
+ type: str
+ description:
+ - C(present) allows to create or modify an Active Directory service.
+ - C(absent) allows to delete a Active Directory service.
+ choices:
+ - present
+ - absent
+ default: present
+ test_connection:
+ type: bool
+ description:
+ - Enables testing the connection to the domain controller.
+ - The connection to the domain controller is tested with the provided Active Directory service details.
+ - If test fails, module will error out.
+ - If C(yes), I(domain_username) and I(domain_password) has to be provided.
+ default: no
+ domain_password:
+ type: str
+ description:
+ - Provide the domain password.
+ - This is applicable when I(test_connection) is C(yes).
+ domain_username:
+ type: str
+ description:
+ - Provide the domain username either in the UPN (username@domain) or NetBIOS (domain\\\\username) format.
+ - This is applicable when I(test_connection) is C(yes).
+ validate_certificate:
+ type: bool
+ description:
+ - Enables validation of SSL certificate of the domain controller.
+ - The module will always report change when this is C(yes).
+ default: no
+ certificate_file:
+ type: path
+ description:
+ - Provide the full path of the SSL certificate.
+ - The certificate should be a Root CA Certificate encoded in Base64 format.
+ - This is applicable when I(validate_certificate) is C(yes).
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - The module will always report change when I(validate_certificate) is C(yes).
+ - Run this module from a system that has direct access to OpenManage Enterprise.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Add Active Directory service using DNS lookup along with the test connection
+ dellemc.openmanage.ome_active_directory:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: my_ad1
+ domain_server:
+ - domainname.com
+ group_domain: domainname.com
+ test_connection: yes
+ domain_username: user@domainname
+ domain_password: domain_password
+
+- name: Add Active Directory service using IP address of the domain controller with certificate validation
+ dellemc.openmanage.ome_active_directory:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: my_ad2
+ domain_controller_lookup: MANUAL
+ domain_server:
+ - 192.68.20.181
+ group_domain: domainname.com
+ validate_certificate: yes
+ certificate_file: "/path/to/certificate/file.cer"
+
+- name: Modify domain controller IP address, network_timeout and group_domain
+ dellemc.openmanage.ome_active_directory:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: my_ad2
+ domain_controller_lookup: MANUAL
+ domain_server:
+ - 192.68.20.189
+ group_domain: newdomain.in
+ network_timeout: 150
+
+- name: Delete Active Directory service
+ dellemc.openmanage.ome_active_directory:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: my_ad2
+ state: absent
+
+- name: Test connection to existing Active Directory service with certificate validation
+ dellemc.openmanage.ome_active_directory:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: my_ad2
+ test_connection: yes
+ domain_username: user@domainname
+ domain_password: domain_password
+ validate_certificate: yes
+ certificate_file: "/path/to/certificate/file.cer"
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the Active Directory operation.
+ returned: always
+ sample: "Successfully renamed the slot(s)."
+active_directory:
+ type: dict
+ description: The Active Directory that was added, modified or deleted by this module.
+ returned: on change
+ sample: {
+ "Name": "ad_test",
+ "Id": 21789,
+ "ServerType": "MANUAL",
+ "ServerName": ["192.168.20.181"],
+ "DnsServer": [],
+ "GroupDomain": "dellemcdomain.com",
+ "NetworkTimeOut": 120,
+ "Password": null,
+ "SearchTimeOut": 120,
+ "ServerPort": 3269,
+ "CertificateValidation": false
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error_info": {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to connect to the LDAP or AD server because the entered credentials are invalid.",
+ "MessageArgs": [],
+ "MessageId": "CSEC5002",
+ "RelatedProperties": [],
+ "Resolution": "Make sure the server input configuration are valid and retry the operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+ }
+"""
+
+import json
+import os
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.common.dict_transformations import recursive_diff
+
+AD_URI = "AccountService/ExternalAccountProvider/ADAccountProvider"
+TEST_CONNECTION = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.TestADConnection"
+DELETE_AD = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.DeleteExternalAccountProvider"
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+MAX_AD_MSG = "Unable to add the account provider because the maximum number of configurations allowed for an" \
+ " Active Directory service is {0}."
+CREATE_SUCCESS = "Successfully added the Active Directory service."
+MODIFY_SUCCESS = "Successfully modified the Active Directory service."
+DELETE_SUCCESS = "Successfully deleted the Active Directory service."
+DOM_SERVER_MSG = "Specify the domain server. Domain server is required to create an Active Directory service."
+GRP_DOM_MSG = "Specify the group domain. Group domain is required to create an Active Directory service."
+CERT_INVALID = "The provided certificate file path is invalid or not readable."
+DOMAIN_ALLOWED_COUNT = "Maximum entries allowed for {0} lookup type is {1}."
+TEST_CONNECTION_SUCCESS = "Test Connection is successful. "
+TEST_CONNECTION_FAIL = "Test Connection has failed. "
+ERR_READ_FAIL = "Unable to retrieve the error details."
+INVALID_ID = "The provided Active Directory ID is invalid."
+TIMEOUT_RANGE = "The {0} value is not in the range of {1} to {2}."
+MAX_AD = 2
+MIN_TIMEOUT = 15
+MAX_TIMEOUT = 300
+
+
+def get_ad(module, rest_obj):
+ ad = {}
+ prm = module.params
+ resp = rest_obj.invoke_request('GET', AD_URI)
+ ad_list = resp.json_data.get('value')
+ ad_cnt = len(ad_list)
+ ky = 'Name'
+ vl = 'name'
+ if prm.get('id'):
+ ky = 'Id'
+ vl = 'id'
+ for adx in ad_list:
+ if str(adx.get(ky)).lower() == str(prm.get(vl)).lower():
+ ad = adx
+ break
+ return ad, ad_cnt
+
+
+def test_http_error_fail(module, err):
+ try:
+ error_info = json.load(err)
+ err_list = error_info.get('error', {}).get('@Message.ExtendedInfo', [ERR_READ_FAIL])
+ if err_list:
+ err_rsn = err_list[0].get("Message")
+ except Exception:
+ err_rsn = ERR_READ_FAIL
+ module.fail_json(msg="{0}{1}".format(TEST_CONNECTION_FAIL, err_rsn), error_info=error_info)
+
+
+def test_connection(module, rest_obj, create_payload):
+ try:
+ create_payload['UserName'] = module.params.get('domain_username')
+ create_payload['Password'] = module.params.get('domain_password')
+ rest_obj.invoke_request('POST', TEST_CONNECTION, data=create_payload,
+ api_timeout=create_payload['NetworkTimeOut'])
+ create_payload.pop('UserName', None)
+ create_payload.pop('Password', None)
+ except HTTPError as err:
+ test_http_error_fail(module, err)
+ except SSLError as err:
+ module.fail_json(msg="{0}{1}".format(TEST_CONNECTION_FAIL, str(err)))
+ except Exception as err:
+ module.fail_json(msg="{0}{1}".format(TEST_CONNECTION_FAIL, str(err)))
+
+
+def make_payload(prm):
+ dc_type = {'DNS': 'DnsServer', 'MANUAL': 'ServerName'}
+ tmplt_ad = {'name': 'Name', 'domain_controller_port': 'ServerPort', 'domain_controller_lookup': 'ServerType',
+ 'domain_server': dc_type[prm.get('domain_controller_lookup')], 'group_domain': 'GroupDomain',
+ 'network_timeout': 'NetworkTimeOut', 'search_timeout': 'SearchTimeOut',
+ 'validate_certificate': 'CertificateValidation'}
+ payload = dict([(v, prm.get(k)) for k, v in tmplt_ad.items() if prm.get(k) is not None])
+ return payload
+
+
+def validate_n_testconnection(module, rest_obj, payload):
+ dc_cnt = {'DNS': 1, 'MANUAL': 3}
+ dc_type = {'DNS': 'DnsServer', 'MANUAL': 'ServerName'}
+ dc_lookup = payload.get('ServerType')
+ if len(payload.get(dc_type[dc_lookup])) > dc_cnt[dc_lookup]:
+ module.fail_json(msg=DOMAIN_ALLOWED_COUNT.format(dc_lookup, dc_cnt[dc_lookup]))
+ t_list = ['NetworkTimeOut', 'SearchTimeOut']
+ for tx in t_list:
+ if payload.get(tx) not in range(MIN_TIMEOUT, MAX_TIMEOUT + 1):
+ module.fail_json(msg=TIMEOUT_RANGE.format(tx, MIN_TIMEOUT, MAX_TIMEOUT))
+ payload['CertificateFile'] = ""
+ if payload.get('CertificateValidation'):
+ cert_path = module.params.get('certificate_file')
+ if os.path.exists(cert_path):
+ with open(cert_path, 'r') as certfile:
+ cert_data = certfile.read()
+ payload['CertificateFile'] = cert_data
+ else:
+ module.fail_json(msg=CERT_INVALID)
+ msg = ""
+ if module.params.get('test_connection'):
+ test_connection(module, rest_obj, payload)
+ msg = TEST_CONNECTION_SUCCESS
+ return msg
+
+
+def create_ad(module, rest_obj):
+ prm = module.params
+ if not prm.get('domain_server'):
+ module.fail_json(msg=DOM_SERVER_MSG)
+ if not prm.get('group_domain'):
+ module.fail_json(msg=GRP_DOM_MSG)
+ create_payload = make_payload(prm)
+ msg = validate_n_testconnection(module, rest_obj, create_payload)
+ if module.check_mode:
+ module.exit_json(msg="{0}{1}".format(msg, CHANGES_FOUND), changed=True)
+ resp = rest_obj.invoke_request('POST', AD_URI, data=create_payload)
+ ad = resp.json_data
+ ad.pop('CertificateFile', "")
+ module.exit_json(msg="{0}{1}".format(msg, CREATE_SUCCESS), active_directory=ad, changed=True)
+
+
+def modify_ad(module, rest_obj, ad):
+ prm = module.params
+ modify_payload = make_payload(prm)
+ ad = rest_obj.strip_substr_dict(ad)
+ if ad.get('ServerName'):
+ (ad.get('ServerName')).sort()
+ if modify_payload.get('ServerName'):
+ (modify_payload.get('ServerName')).sort()
+ diff = recursive_diff(modify_payload, ad)
+ is_change = False
+ if diff:
+ if diff[0]:
+ is_change = True
+ ad.update(modify_payload)
+ msg = validate_n_testconnection(module, rest_obj, ad)
+ if not is_change and not ad.get('CertificateValidation'):
+ module.exit_json(msg="{0}{1}".format(msg, NO_CHANGES_MSG), active_directory=ad)
+ if module.check_mode:
+ module.exit_json(msg="{0}{1}".format(msg, CHANGES_FOUND), changed=True)
+ resp = rest_obj.invoke_request('PUT', "{0}({1})".format(AD_URI, ad['Id']), data=ad)
+ ad = resp.json_data
+ ad.pop('CertificateFile', "")
+ module.exit_json(msg="{0}{1}".format(msg, MODIFY_SUCCESS), active_directory=ad, changed=True)
+
+
+def delete_ad(module, rest_obj, ad):
+ ad = rest_obj.strip_substr_dict(ad)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, active_directory=ad, changed=True)
+ resp = rest_obj.invoke_request('POST', DELETE_AD, data={"AccountProviderIds": [int(ad['Id'])]})
+ module.exit_json(msg=DELETE_SUCCESS, active_directory=ad, changed=True)
+
+
+def main():
+ specs = {
+ "state": {"type": 'str', "choices": ["present", "absent"], "default": 'present'},
+ "name": {"type": 'str'},
+ "id": {"type": 'int'},
+ "domain_controller_lookup": {"type": 'str', "choices": ['MANUAL', 'DNS'], "default": 'DNS'},
+ "domain_server": {"type": 'list', "elements": 'str'},
+ "group_domain": {"type": 'str'},
+ "domain_controller_port": {"type": 'int', "default": 3269},
+ "network_timeout": {"type": 'int', "default": 120},
+ "search_timeout": {"type": 'int', "default": 120},
+ "validate_certificate": {"type": 'bool', "default": False},
+ "certificate_file": {"type": 'path'},
+ "test_connection": {"type": 'bool', "default": False},
+ "domain_username": {"type": 'str'},
+ "domain_password": {"type": 'str', "no_log": True}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[('name', 'id')],
+ required_if=[
+ ('test_connection', True, ('domain_username', 'domain_password',)),
+ ('validate_certificate', True, ('certificate_file',))],
+ mutually_exclusive=[('name', 'id')],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ ad, ad_cnt = get_ad(module, rest_obj)
+ if module.params.get('state') == 'present':
+ if ad:
+ modify_ad(module, rest_obj, ad)
+ else:
+ if module.params.get('id'):
+ module.fail_json(msg=INVALID_ID)
+ if ad_cnt < MAX_AD:
+ create_ad(module, rest_obj)
+ module.fail_json(msg=MAX_AD_MSG.format(MAX_AD))
+ else:
+ if ad:
+ delete_ad(module, rest_obj, ad)
+ module.exit_json(msg=NO_CHANGES_MSG)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (
+ IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError,
+ OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py
new file mode 100644
index 000000000..66a8b26c0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_alerts_smtp
+short_description: This module allows to configure SMTP or email configurations
+version_added: "4.3.0"
+description:
+ - This module allows to configure SMTP or email configurations on OpenManage Enterprise
+ and OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ destination_address:
+ description: The IP address or FQDN of the SMTP destination server.
+ type: str
+ required: true
+ port_number:
+ description: The port number of the SMTP destination server.
+ type: int
+ use_ssl:
+ description: Use SSL to connect with the SMTP server.
+ type: bool
+ enable_authentication:
+ description:
+ - Enable or disable authentication to access the SMTP server.
+ - The I(credentials) are mandatory if I(enable_authentication) is C(True).
+ - The module will always report change when this is C(True).
+ type: bool
+ required: true
+ credentials:
+ description: The credentials for the SMTP server
+ type: dict
+ suboptions:
+ username:
+ description:
+ - The username to access the SMTP server.
+ type: str
+ required: true
+ password:
+ description:
+ - The password to access the SMTP server.
+ type: str
+ required: true
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - The module will always report change when I(enable_authentication) is C(True).
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module support C(check_mode).
+author:
+ - Sachin Apagundi(@sachin-apa)
+'''
+
+EXAMPLES = """
+---
+- name: Update SMTP destination server configuration with authentication
+ dellemc.openmanage.ome_application_alerts_smtp:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ destination_address: "localhost"
+ port_number: 25
+ use_ssl: true
+ enable_authentication: true
+ credentials:
+ username: "username"
+ password: "password"
+- name: Update SMTP destination server configuration without authentication
+ dellemc.openmanage.ome_application_alerts_smtp:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ destination_address: "localhost"
+ port_number: 25
+ use_ssl: false
+ enable_authentication: false
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the SMTP settings update.
+ returned: always
+ sample: "Successfully updated the SMTP settings."
+smtp_details:
+ type: dict
+ description: returned when SMTP settings are updated successfully.
+ returned: success
+ sample: {
+ "DestinationAddress": "localhost",
+ "PortNumber": 25,
+ "UseCredentials": true,
+ "UseSSL": false,
+ "Credential": {
+ "User": "admin",
+ "Password": null
+ }
+ }
+
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [{
+ "MessageId": "CAPP1106",
+ "RelatedProperties": [],
+ "Message": "Unable to update the SMTP settings because the entered credential is invalid or empty.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Either enter valid credentials or disable the Use Credentials option and retry the operation."
+ }
+ ]
+ }
+ }
+"""
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.common.dict_transformations import recursive_diff
+
+SUCCESS_MSG = "Successfully updated the SMTP settings."
+SMTP_URL = "AlertService/AlertDestinations/SMTPConfiguration"
+NO_CHANGES = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+
+
+def fetch_smtp_settings(rest_obj):
+ final_resp = rest_obj.invoke_request("GET", SMTP_URL)
+ ret_data = final_resp.json_data.get('value')[0]
+ ret_data.pop("@odata.type")
+ return ret_data
+
+
+def update_smtp_settings(rest_obj, payload):
+ final_resp = rest_obj.invoke_request("POST", SMTP_URL, data=payload)
+ return final_resp
+
+
+def update_payload(module, curr_payload):
+ smtp_data_payload = {
+ "DestinationAddress": get_value(module, curr_payload, "destination_address", "DestinationAddress"),
+ "UseCredentials": get_value(module, curr_payload, "enable_authentication", "UseCredentials"),
+ "PortNumber": get_value(module, curr_payload, "port_number", "PortNumber"),
+ "UseSSL": get_value(module, curr_payload, "use_ssl", "UseSSL")
+ }
+ if module.params.get("credentials") and smtp_data_payload.get("UseCredentials"):
+ cred_payload = {
+ "Credential": {
+ "User": module.params.get("credentials").get("username"),
+ "Password": module.params.get("credentials").get("password")
+ }
+ }
+ smtp_data_payload.update(cred_payload)
+ return smtp_data_payload
+
+
+def get_value(module, resp, mod_key, attr_key):
+ ret_value = module.params.get(mod_key)
+ if module.params.get(mod_key) is None:
+ ret_value = resp.get(attr_key)
+ return ret_value
+
+
+def _diff_payload(curr_resp, update_resp):
+ is_change = False
+ if update_resp:
+ diff = recursive_diff(update_resp, curr_resp)
+ if diff and diff[0]:
+ is_change = True
+ return is_change
+
+
+def password_no_log(attributes):
+ if isinstance(attributes, dict) and 'password' in attributes:
+ attributes['password'] = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+
+
+def fail_module(module, **failmsg):
+ password_no_log(module.params.get("credentials"))
+ module.fail_json(**failmsg)
+
+
+def exit_module(module, **existmsg):
+ password_no_log(module.params.get("credentials"))
+ module.exit_json(**existmsg)
+
+
+def process_check_mode(module, diff):
+ if not diff and not module.check_mode:
+ exit_module(module, msg=NO_CHANGES)
+ elif not diff and module.check_mode:
+ exit_module(module, msg=NO_CHANGES)
+ elif diff and module.check_mode:
+ exit_module(module, msg=CHANGES_FOUND, changed=True)
+
+
+def main():
+ credentials_options = {"username": {"type": "str", "required": True},
+ "password": {"type": "str", "required": True, "no_log": True}}
+
+ specs = {
+ "destination_address": {"required": True, "type": "str"},
+ "port_number": {"required": False, "type": "int"},
+ "use_ssl": {"required": False, "type": "bool"},
+ "enable_authentication": {"required": True, "type": "bool"},
+ "credentials":
+ {"required": False, "type": "dict",
+ "options": credentials_options,
+ },
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['enable_authentication', True, ['credentials']], ],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ curr_resp = fetch_smtp_settings(rest_obj)
+ payload = update_payload(module, curr_resp)
+ diff = _diff_payload(curr_resp, payload)
+ process_check_mode(module, diff)
+ resp = update_smtp_settings(rest_obj, payload)
+ exit_module(module, msg=SUCCESS_MSG,
+ smtp_details=resp.json_data, changed=True)
+
+ except HTTPError as err:
+ fail_module(module, msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ exit_module(module, msg=str(err), unreachable=True)
+ except (
+ IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError,
+ OSError) as err:
+ fail_module(module, msg=str(err), error_info=json.load(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py
new file mode 100644
index 000000000..12c212450
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_application_alerts_syslog
+short_description: Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular
+description: This module allows to configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular.
+version_added: 4.3.0
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ syslog_servers:
+ description: List of servers to forward syslog.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description: The ID of the syslog server.
+ type: int
+ choices: [1, 2, 3, 4]
+ required: True
+ enabled:
+ description: Enable or disable syslog forwarding.
+ type: bool
+ destination_address:
+ description:
+ - The IP address, FQDN or hostname of the syslog server.
+ - This is required if I(enabled) is C(True).
+ type: str
+ port_number:
+ description: The UDP port number of the syslog server.
+ type: int
+requirements:
+ - "python >= 3.8.6"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+notes:
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or Dell EMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure single server to forward syslog
+ dellemc.openmanage.ome_application_alerts_syslog:
+ hostname: 192.168.0.1
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ syslog_servers:
+ - id: 1
+ enabled: true
+ destination_address: 192.168.0.2
+ port_number: 514
+
+- name: Configure multiple server to forward syslog
+ dellemc.openmanage.ome_application_alerts_syslog:
+ hostname: 192.168.0.1
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ syslog_servers:
+ - id: 1
+ port_number: 523
+ - id: 2
+ enabled: true
+ destination_address: sysloghost1.lab.com
+ - id: 3
+ enabled: false
+ - id: 4
+ enabled: true
+ destination_address: 192.168.0.4
+ port_number: 514
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the syslog forwarding operation.
+ returned: always
+ sample: Successfully updated the syslog forwarding settings.
+syslog_details:
+ type: list
+ description: Syslog forwarding settings list applied.
+ returned: on success
+ sample: [
+ {
+ "DestinationAddress": "192.168.10.43",
+ "Enabled": false,
+ "Id": 1,
+ "PortNumber": 514
+ },
+ {
+ "DestinationAddress": "192.168.10.46",
+ "Enabled": true,
+ "Id": 2,
+ "PortNumber": 514
+ },
+ {
+ "DestinationAddress": "192.168.10.44",
+ "Enabled": true,
+ "Id": 3,
+ "PortNumber": 514
+ },
+ {
+ "DestinationAddress": "192.168.10.42",
+ "Enabled": true,
+ "Id": 4,
+ "PortNumber": 515
+ }
+ ]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CAPP1108",
+ "RelatedProperties": [],
+ "Message": "Unable to update the Syslog settings because the request contains an invalid number of
+ configurations. The request must contain no more than 4 configurations but contains 5.",
+ "MessageArgs": [
+ "4",
+ "5"
+ ],
+ "Severity": "Warning",
+ "Resolution": "Enter only the required number of configurations as identified in the message and
+ retry the operation."
+ }
+ ]
+ }
+}
+"""
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+SYSLOG_GET = "AlertService/AlertDestinations/SyslogConfiguration"
+SYSLOG_SET = "AlertService/AlertDestinations/Actions/AlertDestinations.ApplySyslogConfig"
+SUCCESS_MSG = "Successfully updated the syslog forwarding settings."
+DUP_ID_MSG = "Duplicate server IDs are provided."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+SYSLOG_UDP = 514
+
+
+def validate_input(module):
+ mparams = module.params
+ syslog_list = mparams.get("syslog_servers")
+ if not syslog_list:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ syslog_dict = {}
+ for sys in syslog_list:
+ trim_sys = dict((k, v) for k, v in sys.items() if v is not None)
+ syslog_dict[sys.get('id')] = snake_dict_to_camel_dict(trim_sys, capitalize_first=True)
+ if len(syslog_dict) < len(syslog_list):
+ module.exit_json(msg=DUP_ID_MSG, failed=True)
+ return syslog_dict
+
+
+def strip_substr_dict(odata_dict, chkstr='@odata.'):
+ cp = odata_dict.copy()
+ klist = cp.keys()
+ for k in klist:
+ if chkstr in str(k).lower():
+ odata_dict.pop(k)
+ if not odata_dict.get('PortNumber'):
+ odata_dict['PortNumber'] = SYSLOG_UDP
+ return odata_dict
+
+
+def get_current_syslog(rest_obj):
+ resp = rest_obj.invoke_request("GET", SYSLOG_GET)
+ syslog_list = resp.json_data.get('value')
+ return syslog_list
+
+
+def compare_get_payload(module, current_list, input_config):
+ payload_list = [strip_substr_dict(sys) for sys in current_list] # preserving list order
+ current_config = dict([(sys.get('Id'), sys) for sys in payload_list])
+ diff = 0
+ for k, v in current_config.items():
+ i_dict = input_config.get(k)
+ if i_dict:
+ d = recursive_diff(i_dict, v)
+ if d and d[0]:
+ v.update(d[0])
+ diff = diff + 1
+ v.pop("Id", None) # not mandatory
+ payload_list[int(k) - 1] = v # The order in list needs to be maintained
+ if not diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ return payload_list
+
+
+def main():
+ specs = {
+ "syslog_servers":
+ {"type": 'list', "elements": 'dict', "options":
+ {"id": {"type": 'int', "choices": [1, 2, 3, 4], "required": True},
+ "enabled": {"type": 'bool'},
+ "destination_address": {"type": 'str'},
+ "port_number": {"type": 'int'}
+ },
+ "required_one_of": [("enabled", "destination_address", "port_number")],
+ "required_if": [("enabled", True, ("destination_address",))]
+ }
+ }
+ specs.update(ome_auth_params)
+
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ input_config = validate_input(module)
+ current_list = get_current_syslog(rest_obj)
+ payload = compare_get_payload(module, current_list, input_config)
+ resp = rest_obj.invoke_request("POST", SYSLOG_SET, data=payload, api_timeout=120)
+ # POST Call taking average 50-60 seconds so api_timeout=120
+ module.exit_json(msg=SUCCESS_MSG, syslog_details=resp.json_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (
+ IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError,
+ OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py
new file mode 100644
index 000000000..3c9b26994
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_certificate
+short_description: This module allows to generate a CSR and upload the certificate
+version_added: "2.1.0"
+description:
+ - This module allows the generation a new certificate signing request (CSR) and to upload the certificate
+ on OpenManage Enterprise.
+notes:
+ - If a certificate is uploaded, which is identical to an already existing certificate, it is accepted by the module.
+ - This module does not support C(check_mode).
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ command:
+ description: C(generate_csr) allows the generation of a CSR and C(upload) uploads the certificate.
+ type: str
+ default: generate_csr
+ choices: [generate_csr, upload]
+ distinguished_name:
+ description: Name of the certificate issuer. This option is applicable for C(generate_csr).
+ type: str
+ department_name:
+ description: Name of the department that issued the certificate. This option is applicable for C(generate_csr).
+ type: str
+ business_name:
+ description: Name of the business that issued the certificate. This option is applicable for C(generate_csr).
+ type: str
+ locality:
+ description: Local address of the issuer of the certificate. This option is applicable for C(generate_csr).
+ type: str
+ country_state:
+ description: State in which the issuer resides. This option is applicable for C(generate_csr).
+ type: str
+ country:
+ description: Country in which the issuer resides. This option is applicable for C(generate_csr).
+ type: str
+ email:
+ description: Email associated with the issuer. This option is applicable for C(generate_csr).
+ type: str
+ upload_file:
+ type: str
+ description: Local path of the certificate file to be uploaded. This option is applicable for C(upload).
+ Once the certificate is uploaded, OpenManage Enterprise cannot be accessed for a few seconds.
+requirements:
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+'''
+
+EXAMPLES = r'''
+---
+- name: Generate a certificate signing request
+ dellemc.openmanage.ome_application_certificate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "generate_csr"
+ distinguished_name: "hostname.com"
+ department_name: "Remote Access Group"
+ business_name: "Dell Inc."
+ locality: "Round Rock"
+ country_state: "Texas"
+ country: "US"
+ email: "support@dell.com"
+
+- name: Upload the certificate
+ dellemc.openmanage.ome_application_certificate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "upload"
+ upload_file: "/path/certificate.cer"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the certificate signing request.
+ returned: always
+ sample: "Successfully generated certificate signing request."
+csr_status:
+ type: dict
+ description: Details of the generated certificate.
+ returned: on success
+ sample:
+ {"CertificateData": "-----BEGIN CERTIFICATE REQUEST-----GHFSUEKLELE
+ af3u4h2rkdkfjasczjfefhkrr/frjrfrjfrxnvzklf/nbcvxmzvndlskmcvbmzkdk
+ kafhaksksvklhfdjtrhhffgeth/tashdrfstkm@kdjFGD/sdlefrujjfvvsfeikdf
+ yeufghdkatbavfdomehtdnske/tahndfavdtdfgeikjlagmdfbandfvfcrfgdtwxc
+ qwgfrteyupojmnsbajdkdbfs/ujdfgthedsygtamnsuhakmanfuarweyuiwruefjr
+ etwuwurefefgfgurkjkdmbvfmvfvfk==-----END CERTIFICATE REQUEST-----"
+ }
+error_info:
+ description: Details of the HTTP error.
+ returned: on HTTP error
+ type: dict
+ sample:
+ {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CSEC9002",
+ "RelatedProperties": [],
+ "Message": "Unable to upload the certificate because the certificate file provided is invalid.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Make sure the CA certificate and private key are correct and retry the operation."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import os
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+def get_resource_parameters(module):
+ command = module.params["command"]
+ csr_uri = "ApplicationService/Actions/ApplicationService.{0}"
+ method = "POST"
+ if command == "generate_csr":
+ uri = csr_uri.format("GenerateCSR")
+ payload = {"DistinguishedName": module.params["distinguished_name"],
+ "DepartmentName": module.params["department_name"],
+ "BusinessName": module.params["business_name"],
+ "Locality": module.params["locality"], "State": module.params["country_state"],
+ "Country": module.params["country"], "Email": module.params["email"]}
+ else:
+ file_path = module.params["upload_file"]
+ uri = csr_uri.format("UploadCertificate")
+ if os.path.exists(file_path):
+ with open(file_path, 'rb') as payload:
+ payload = payload.read()
+ else:
+ module.fail_json(msg="No such file or directory.")
+ return method, uri, payload
+
+
+def main():
+ specs = {
+ "command": {"type": "str", "required": False,
+ "choices": ["generate_csr", "upload"], "default": "generate_csr"},
+ "distinguished_name": {"required": False, "type": "str"},
+ "department_name": {"required": False, "type": "str"},
+ "business_name": {"required": False, "type": "str"},
+ "locality": {"required": False, "type": "str"},
+ "country_state": {"required": False, "type": "str"},
+ "country": {"required": False, "type": "str"},
+ "email": {"required": False, "type": "str"},
+ "upload_file": {"required": False, "type": "str"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[["command", "generate_csr", ["distinguished_name", "department_name",
+ "business_name", "locality", "country_state",
+ "country", "email"]],
+ ["command", "upload", ["upload_file"]]],
+ supports_check_mode=False
+ )
+ header = {"Content-Type": "application/octet-stream", "Accept": "application/octet-stream"}
+ try:
+ with RestOME(module.params, req_session=False) as rest_obj:
+ method, uri, payload = get_resource_parameters(module)
+ command = module.params.get("command")
+ dump = False if command == "upload" else True
+ headers = header if command == "upload" else None
+ resp = rest_obj.invoke_request(method, uri, headers=headers, data=payload, dump=dump)
+ if resp.success:
+ if command == "generate_csr":
+ module.exit_json(msg="Successfully generated certificate signing request.",
+ csr_status=resp.json_data)
+ module.exit_json(msg="Successfully uploaded application certificate.", changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
new file mode 100644
index 000000000..67b00dc8b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
@@ -0,0 +1,669 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.2.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_console_preferences
+short_description: Configure console preferences on OpenManage Enterprise.
+description: This module allows user to configure the console preferences on OpenManage Enterprise.
+version_added: "5.2.0"
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ report_row_limit:
+ description: The maximum number of rows that you can view on OpenManage Enterprise reports.
+ type: int
+ device_health:
+ description: The time after which the health of the devices must be automatically monitored and updated
+ on the OpenManage Enterprise dashboard.
+ type: dict
+ suboptions:
+ health_check_interval:
+ description: The frequency at which the device health must be recorded and data stored.
+ type: int
+ health_check_interval_unit:
+ description:
+ - The time unit of the frequency at which the device health must be recorded and data stored.
+ - C(Hourly) to set the frequency in hours.
+ - C(Minutes) to set the frequency in minutes.
+ type: str
+ choices: [Hourly, Minutes]
+ health_and_power_state_on_connection_lost:
+ description:
+ - The latest recorded device health.
+ - C(last_known) to display the latest recorded device health when the power connection was lost.
+ - C(unknown) to display the latest recorded device health when the device status moved to unknown.
+ type: str
+ choices: [last_known, unknown]
+ discovery_settings:
+ description: The device naming to be used by the OpenManage Enterprise to identify the discovered iDRACs
+ and other devices.
+ type: dict
+ suboptions:
+ general_device_naming:
+ description:
+ - Applicable to all the discovered devices other than the iDRACs.
+ - C(DNS) to use the DNS name.
+ - C(NETBIOS) to use the NetBIOS name.
+ type: str
+ choices: [DNS, NETBIOS]
+ default: DNS
+ server_device_naming:
+ description:
+ - Applicable to iDRACs only.
+ - C(IDRAC_HOSTNAME) to use the iDRAC hostname.
+ - C(IDRAC_SYSTEM_HOSTNAME) to use the system hostname.
+ type: str
+ choices: [IDRAC_HOSTNAME, IDRAC_SYSTEM_HOSTNAME]
+ default: IDRAC_SYSTEM_HOSTNAME
+ invalid_device_hostname:
+ description: The invalid hostnames separated by a comma.
+ type: str
+ common_mac_addresses:
+ description: The common MAC addresses separated by a comma.
+ type: str
+ server_initiated_discovery:
+ description: Server initiated discovery settings.
+ type: dict
+ suboptions:
+ device_discovery_approval_policy:
+ description:
+ - Discovery approval policies.
+ - "C(Automatic) allows servers with iDRAC Firmware version 4.00.00.00, which are on the same network as the
+ console, to be discovered automatically by the console."
+ - C(Manual) for the servers to be discovered by the user manually.
+ type: str
+ choices: [Automatic, Manual]
+ set_trap_destination:
+ description: Trap destination settings.
+ type: bool
+ mx7000_onboarding_preferences:
+ description:
+ - Alert-forwarding behavior on chassis when they are onboarded.
+ - C(all) to receive all alert.
+ - C(chassis) to receive chassis category alerts only.
+ type: str
+ choices: [all, chassis]
+ builtin_appliance_share:
+ description: The external network share that the appliance must access to complete operations.
+ type: dict
+ suboptions:
+ share_options:
+ description:
+ - The share options.
+ - C(CIFS) to select CIFS share type.
+ - C(HTTPS) to select HTTPS share type.
+ type: str
+ choices: [CIFS, HTTPS]
+ cifs_options:
+ description:
+ - The SMB protocol version.
+ - I(cifs_options) is required I(share_options) is C(CIFS).
+ - C(V1) to enable SMBv1.
+ - C(V2) to enable SMBv2
+ type: str
+ choices: [V1, V2]
+ email_sender_settings:
+ description: The email address of the user who is sending an email message.
+ type: str
+ trap_forwarding_format:
+ description:
+ - The trap forwarding format.
+ - C(Original) to retain the trap data as is.
+ - C(Normalized) to normalize the trap data.
+ type: str
+ choices: [Original, Normalized]
+ metrics_collection_settings:
+ description: The frequency of the PowerManager extension data maintenance and purging.
+ type: int
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - This module supports C(check_mode).
+author:
+ - Sachin Apagundi(@sachin-apa)
+ - Husniya Hameed (@husniya-hameed)
+'''
+
+EXAMPLES = r'''
+---
+- name: Update Console preferences with all the settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ report_row_limit: 123
+ device_health:
+ health_check_interval: 1
+ health_check_interval_unit: Hourly
+ health_and_power_state_on_connection_lost: last_known
+ discovery_settings:
+ general_device_naming: DNS
+ server_device_naming: IDRAC_HOSTNAME
+ invalid_device_hostname: "localhost"
+ common_mac_addresses: "::"
+ server_initiated_discovery:
+ device_discovery_approval_policy: Automatic
+ set_trap_destination: True
+ mx7000_onboarding_preferences: all
+ builtin_appliance_share:
+ share_options: CIFS
+ cifs_options: V1
+ email_sender_settings: "admin@dell.com"
+ trap_forwarding_format: Normalized
+ metrics_collection_settings: 31
+
+- name: Update Console preferences with report and device health settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ report_row_limit: 236
+ device_health:
+ health_check_interval: 10
+ health_check_interval_unit: Hourly
+ health_and_power_state_on_connection_lost: last_known
+
+- name: Update Console preferences with invalid device health settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_health:
+ health_check_interval: 65
+ health_check_interval_unit: Minutes
+
+- name: Update Console preferences with discovery and built in appliance share settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ discovery_settings:
+ general_device_naming: DNS
+ server_device_naming: IDRAC_SYSTEM_HOSTNAME
+ invalid_device_hostname: "localhost"
+ common_mac_addresses: "00:53:45:00:00:00"
+ builtin_appliance_share:
+ share_options: CIFS
+ cifs_options: V1
+
+- name: Update Console preferences with server initiated discovery, mx7000 onboarding preferences, email sender,
+ trap forwarding format, and metrics collection settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ server_initiated_discovery:
+ device_discovery_approval_policy: Automatic
+ set_trap_destination: True
+ mx7000_onboarding_preferences: chassis
+ email_sender_settings: "admin@dell.com"
+ trap_forwarding_format: Original
+ metrics_collection_settings: 365
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the console preferences.
+ returned: always
+ sample: "Successfully update the console preferences."
+console_preferences:
+ type: list
+ description: Details of the console preferences.
+ returned: on success
+ sample:
+ [
+ {
+ "Name": "DEVICE_PREFERRED_NAME",
+ "DefaultValue": "SLOT_NAME",
+ "Value": "PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME",
+ "DataType": "java.lang.String",
+ "GroupName": "DISCOVERY_SETTING"
+ },
+ {
+ "Name": "INVALID_DEVICE_HOSTNAME",
+ "DefaultValue": "",
+ "Value": "localhost,localhost.localdomain,not defined,pv132t,pv136t,default,dell,idrac-",
+ "DataType": "java.lang.String",
+ "GroupName": "DISCOVERY_SETTING"
+ },
+ {
+ "Name": "COMMON_MAC_ADDRESSES",
+ "DefaultValue": "",
+ "Value": "00:53:45:00:00:00,33:50:6F:45:30:30,50:50:54:50:30:30,00:00:FF:FF:FF:FF,20:41:53:59:4E:FF,00:00:00:00:00:00,20:41:53:59:4e:ff,00:00:00:00:00:00",
+ "DataType": "java.lang.String",
+ "GroupName": "DISCOVERY_SETTING"
+ },
+ {
+ "Name": "SHARE_TYPE",
+ "DefaultValue": "CIFS",
+ "Value": "CIFS",
+ "DataType": "java.lang.String",
+ "GroupName": "BUILT_IN_APPLIANCE_SHARE_SETTINGS"
+ },
+ {
+ "Name": "TRAP_FORWARDING_SETTING",
+ "DefaultValue": "AsIs",
+ "Value": "Normalized",
+ "DataType": "java.lang.String",
+ "GroupName": ""
+ },
+ {
+ "Name": "DATA_PURGE_INTERVAL",
+ "DefaultValue": "365",
+ "Value": "3650000",
+ "DataType": "java.lang.Integer",
+ "GroupName": ""
+ },
+ {
+ "Name": "CONSOLE_CONNECTION_SETTING",
+ "DefaultValue": "last_known",
+ "Value": "last_known",
+ "DataType": "java.lang.String",
+ "GroupName": "CONSOLE_CONNECTION_SETTING"
+ },
+ {
+ "Name": "MIN_PROTOCOL_VERSION",
+ "DefaultValue": "V2",
+ "Value": "V1",
+ "DataType": "java.lang.String",
+ "GroupName": "CIFS_PROTOCOL_SETTINGS"
+ },
+ {
+ "Name": "ALERT_ACKNOWLEDGEMENT_VIEW",
+ "DefaultValue": "2000",
+ "Value": "2000",
+ "DataType": "java.lang.Integer",
+ "GroupName": ""
+ },
+ {
+ "Name": "AUTO_CONSOLE_UPDATE_AFTER_DOWNLOAD",
+ "DefaultValue": "false",
+ "Value": "false",
+ "DataType": "java.lang.Boolean",
+ "GroupName": "CONSOLE_UPDATE_SETTING_GROUP"
+ },
+ {
+ "Name": "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION",
+ "DefaultValue": "false",
+ "Value": "false",
+ "DataType": "java.lang.Boolean",
+ "GroupName": ""
+ },
+ {
+ "Name": "REPORTS_MAX_RESULTS_LIMIT",
+ "DefaultValue": "0",
+ "Value": "2000000000000000000000000",
+ "DataType": "java.lang.Integer",
+ "GroupName": ""
+ },
+ {
+ "Name": "EMAIL_SENDER",
+ "DefaultValue": "omcadmin@dell.com",
+ "Value": "admin1@dell.com@dell.com@dell.com",
+ "DataType": "java.lang.String",
+ "GroupName": ""
+ },
+ {
+ "Name": "MX7000_ONBOARDING_PREF",
+ "DefaultValue": "all",
+ "Value": "test_chassis",
+ "DataType": "java.lang.String",
+ "GroupName": ""
+ },
+ {
+ "Name": "DISCOVERY_APPROVAL_POLICY",
+ "DefaultValue": "Automatic",
+ "Value": "Automatic_test",
+ "DataType": "java.lang.String",
+ "GroupName": ""
+ }
+ ]
+error_info:
+ description: Details of the HTTP error.
+ returned: on HTTP error
+ type: dict
+ sample:
+ {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Enter a valid URI and retry the operation."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+SUCCESS_MSG = "Successfully updated the Console Preferences settings."
+SETTINGS_URL = "ApplicationService/Settings"
+NO_CHANGES = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+HEALTH_CHECK_UNIT_REQUIRED = "The health check unit is required when health check interval is specified."
+HEALTH_CHECK_INTERVAL_REQUIRED = "The health check interval is required when health check unit is specified."
+HEALTH_CHECK_INTERVAL_INVALID = "The health check interval specified is invalid for the {0}"
+JOB_URL = "JobService/Jobs"
+CIFS_URL = "ApplicationService/Actions/ApplicationService.UpdateShareTypeSettings"
+CONSOLE_SETTINGS_VALUES = ["DATA_PURGE_INTERVAL", "EMAIL_SENDER", "TRAP_FORWARDING_SETTING",
+ "MX7000_ONBOARDING_PREF", "REPORTS_MAX_RESULTS_LIMIT",
+ "DISCOVERY_APPROVAL_POLICY", "NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION",
+ "DEVICE_PREFERRED_NAME", "INVALID_DEVICE_HOSTNAME", "COMMON_MAC_ADDRESSES",
+ "CONSOLE_CONNECTION_SETTING", "MIN_PROTOCOL_VERSION", "SHARE_TYPE"]
+
+
+def job_details(rest_obj):
+ query_param = {"$filter": "JobType/Id eq 6"}
+ job_resp = rest_obj.invoke_request("GET", JOB_URL, query_param=query_param)
+ job_data = job_resp.json_data.get('value')
+ tmp_list = [x["Id"] for x in job_data]
+ sorted_id = sorted(tmp_list)
+ latest_job = [val for val in job_data if val["Id"] == sorted_id[-1]]
+ return latest_job[0]
+
+
+def create_job(module):
+ schedule = None
+ job_payload = None
+ device_health = module.params.get("device_health")
+ if device_health:
+ if device_health.get("health_check_interval_unit") == "Hourly":
+ schedule = "0 0 0/" + str(device_health.get("health_check_interval")) + " 1/1 * ? *"
+ elif device_health.get("health_check_interval_unit") == "Minutes":
+ schedule = "0 0/" + str(device_health.get("health_check_interval")) + " * 1/1 * ? *"
+ job_payload = {"Id": 0,
+ "JobName": "Global Health Task",
+ "JobDescription": "Global Health Task",
+ "Schedule": schedule,
+ "State": "Enabled",
+ "JobType": {"Id": 6, "Name": "Health_Task"},
+ "Params": [{"Key": "metricType", "Value": "40, 50"}],
+ "Targets": [{"Id": 500, "Data": "", "TargetType": {"Id": 6000, "Name": "GROUP"}}]}
+ return job_payload, schedule
+
+
+def fetch_cp_settings(rest_obj):
+ final_resp = rest_obj.invoke_request("GET", SETTINGS_URL)
+ ret_data = final_resp.json_data.get('value')
+ return ret_data
+
+
+def create_payload_dict(curr_payload):
+ payload = {}
+ for pay in curr_payload:
+ payload[pay["Name"]] = pay
+ return payload
+
+
+def create_payload(module, curr_payload):
+ console_setting_list = []
+ updated_payload = {"ConsoleSetting": []}
+ payload_dict = create_payload_dict(curr_payload)
+ get_sid = module.params.get("server_initiated_discovery")
+ get_ds = module.params.get("discovery_settings")
+ get_mcs = module.params.get("metrics_collection_settings")
+ get_email = module.params.get("email_sender_settings")
+ get_tff = module.params.get("trap_forwarding_format")
+ get_mx = module.params.get("mx7000_onboarding_preferences")
+ get_rrl = module.params.get("report_row_limit")
+ get_dh = module.params.get("device_health")
+ get_bas = module.params.get("builtin_appliance_share")
+ if get_mcs:
+ payload1 = payload_dict["DATA_PURGE_INTERVAL"].copy()
+ payload1["Value"] = get_mcs
+ console_setting_list.append(payload1)
+ if get_email:
+ payload2 = payload_dict["EMAIL_SENDER"].copy()
+ payload2["Value"] = get_email
+ console_setting_list.append(payload2)
+ if get_tff:
+ dict1 = {"Original": "AsIs", "Normalized": "Normalized"}
+ payload3 = payload_dict["TRAP_FORWARDING_SETTING"].copy()
+ payload3["Value"] = dict1.get(get_tff)
+ console_setting_list.append(payload3)
+ if get_mx:
+ payload4 = payload_dict["MX7000_ONBOARDING_PREF"].copy()
+ payload4["Value"] = get_mx
+ console_setting_list.append(payload4)
+ if get_rrl:
+ payload5 = payload_dict["REPORTS_MAX_RESULTS_LIMIT"].copy()
+ payload5["Value"] = get_rrl
+ console_setting_list.append(payload5)
+ if get_sid:
+ if get_sid.get("device_discovery_approval_policy"):
+ payload6 = payload_dict["DISCOVERY_APPROVAL_POLICY"].copy()
+ payload6["Value"] = get_sid.get("device_discovery_approval_policy")
+ console_setting_list.append(payload6)
+ if get_sid.get("set_trap_destination") is not None:
+ payload7 = payload_dict["NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION"].copy()
+ payload7["Value"] = get_sid.get("set_trap_destination")
+ console_setting_list.append(payload7)
+ if get_ds:
+ if get_ds.get("general_device_naming") and get_ds.get("server_device_naming"):
+ value = "PREFER_" + module.params["discovery_settings"]["general_device_naming"] + "," + "PREFER_" +\
+ get_ds["server_device_naming"]
+ payload8 = payload_dict["DEVICE_PREFERRED_NAME"].copy()
+ payload8["Value"] = value
+ console_setting_list.append(payload8)
+ elif get_ds.get("general_device_naming"):
+ payload9 = payload_dict["DEVICE_PREFERRED_NAME"].copy()
+ payload9["Value"] = "PREFER_" + get_ds["general_device_naming"]
+ console_setting_list.append(payload9)
+ elif get_ds.get("server_device_naming"):
+ payload10 = payload_dict["DEVICE_PREFERRED_NAME"].copy()
+ payload10["Value"] = "PREFER_" + get_ds["server_device_naming"]
+ console_setting_list.append(payload10)
+ if get_ds.get("invalid_device_hostname"):
+ payload11 = payload_dict["INVALID_DEVICE_HOSTNAME"].copy()
+ payload11["Value"] = get_ds.get("invalid_device_hostname")
+ console_setting_list.append(payload11)
+ if get_ds.get("common_mac_addresses"):
+ payload12 = payload_dict["COMMON_MAC_ADDRESSES"].copy()
+ payload12["Value"] = get_ds.get("common_mac_addresses")
+ console_setting_list.append(payload12)
+ if get_dh and get_dh.get("health_and_power_state_on_connection_lost"):
+ payload13 = payload_dict["CONSOLE_CONNECTION_SETTING"].copy()
+ payload13["Value"] = get_dh.get("health_and_power_state_on_connection_lost")
+ console_setting_list.append(payload13)
+ if get_bas and get_bas.get("share_options") == "CIFS":
+ payload14 = payload_dict["MIN_PROTOCOL_VERSION"].copy()
+ payload14["Value"] = get_bas.get("cifs_options")
+ console_setting_list.append(payload14)
+ updated_payload["ConsoleSetting"] = console_setting_list
+ return updated_payload, payload_dict
+
+
+def create_cifs_payload(module, curr_payload):
+ console_setting_list = []
+ updated_payload = {"ConsoleSetting": []}
+ payload_dict = create_payload_dict(curr_payload)
+ get_bas = module.params.get("builtin_appliance_share")
+ if get_bas and get_bas.get("share_options"):
+ payload = payload_dict["SHARE_TYPE"].copy()
+ payload["Value"] = get_bas.get("share_options")
+ console_setting_list.append(payload)
+ updated_payload["ConsoleSetting"] = console_setting_list
+ return updated_payload
+
+
+def update_console_preferences(module, rest_obj, payload, payload_cifs, job_payload, job, payload_dict, schedule):
+ cifs_resp = None
+ job_final_resp = None
+ get_bas = module.params.get("builtin_appliance_share")
+ device_health = module.params.get("device_health")
+ [payload["ConsoleSetting"].remove(i) for i in payload["ConsoleSetting"] if i["Name"] == "SHARE_TYPE"]
+ if device_health and device_health.get("health_check_interval_unit") and job["Schedule"] != schedule:
+ job_final_resp = rest_obj.invoke_request("POST", JOB_URL, data=job_payload)
+ if get_bas and get_bas.get("share_options") and payload_dict["SHARE_TYPE"]["Value"] != \
+ get_bas.get("share_options"):
+ cifs_resp = rest_obj.invoke_request("POST", CIFS_URL, data=payload_cifs)
+ final_resp = rest_obj.invoke_request("POST", SETTINGS_URL, data=payload)
+ return final_resp, cifs_resp, job_final_resp
+
+
+def _diff_payload(curr_resp, update_resp, payload_cifs, schedule, job_det):
+ diff = 0
+ update_resp["ConsoleSetting"].extend(payload_cifs["ConsoleSetting"])
+ if schedule and job_det["Schedule"] != schedule:
+ diff += 1
+ for i in curr_resp:
+ for j in update_resp["ConsoleSetting"]:
+ if i["Name"] == j["Name"]:
+ if isinstance(j["Value"], bool):
+ j["Value"] = str(j["Value"]).lower()
+ if isinstance(j["Value"], int):
+ j["Value"] = str(j["Value"])
+ if i["Value"] != j["Value"]:
+ diff += 1
+ return diff
+
+
+def process_check_mode(module, diff):
+ if not diff:
+ module.exit_json(msg=NO_CHANGES)
+ elif diff and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+
+
+def _validate_params(module):
+ error_message = _validate_health_check_interval(module)
+ if error_message:
+ module.fail_json(msg=error_message)
+
+
+def _validate_health_check_interval(module):
+ error_message = None
+ device_health = module.params.get("device_health")
+ if device_health:
+ hci = device_health.get("health_check_interval")
+ hciu = device_health.get("health_check_interval_unit")
+ if hci and not hciu:
+ error_message = HEALTH_CHECK_UNIT_REQUIRED
+ if hciu and not hci:
+ error_message = HEALTH_CHECK_INTERVAL_REQUIRED
+ if hciu and hci:
+ if hciu == "Hourly" and (hci < 1 or hci > 23):
+ error_message = HEALTH_CHECK_INTERVAL_INVALID.format(hciu)
+ if hciu == "Minutes" and (hci < 1 or hci > 59):
+ error_message = HEALTH_CHECK_INTERVAL_INVALID.format(hciu)
+ return error_message
+
+
+def main():
+ device_health_opt = {"health_check_interval": {"type": "int", "required": False},
+ "health_check_interval_unit": {"type": "str", "required": False,
+ "choices": ["Hourly", "Minutes"]},
+ "health_and_power_state_on_connection_lost": {"type": "str", "required": False,
+ "choices": ["last_known", "unknown"]}
+ }
+ discovery_settings_opt = {
+ "general_device_naming": {"type": "str", "required": False, "default": "DNS",
+ "choices": ["DNS", "NETBIOS"]},
+ "server_device_naming": {"type": "str", "required": False, "default": "IDRAC_SYSTEM_HOSTNAME",
+ "choices": ["IDRAC_HOSTNAME", "IDRAC_SYSTEM_HOSTNAME"]},
+ "invalid_device_hostname": {"type": "str", "required": False},
+ "common_mac_addresses": {"type": "str", "required": False}
+ }
+ server_initiated_discovery_opt = {
+ "device_discovery_approval_policy": {"type": "str", "required": False, "choices": ["Automatic", "Manual"]},
+ "set_trap_destination": {"type": "bool", "required": False, },
+ }
+ builtin_appliance_share_opt = {
+ "share_options": {"type": "str", "required": False,
+ "choices": ["CIFS", "HTTPS"]},
+ "cifs_options": {"type": "str", "required": False,
+ "choices": ["V1", "V2"]
+ },
+ }
+
+ specs = {
+ "report_row_limit": {"required": False, "type": "int"},
+ "device_health": {"required": False, "type": "dict",
+ "options": device_health_opt
+ },
+ "discovery_settings": {"required": False, "type": "dict",
+ "options": discovery_settings_opt
+ },
+ "server_initiated_discovery": {"required": False, "type": "dict",
+ "options": server_initiated_discovery_opt
+ },
+ "mx7000_onboarding_preferences": {"required": False, "type": "str", "choices": ["all", "chassis"]},
+ "builtin_appliance_share": {"required": False, "type": "dict",
+ "options": builtin_appliance_share_opt,
+ "required_if": [['share_options', "CIFS", ('cifs_options',)]]
+ },
+ "email_sender_settings": {"required": False, "type": "str"},
+ "trap_forwarding_format": {"required": False, "type": "str", "choices": ["Normalized", "Original"]},
+ "metrics_collection_settings": {"required": False, "type": "int"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=specs,
+ required_one_of=[["report_row_limit", "device_health", "discovery_settings",
+ "server_initiated_discovery", "mx7000_onboarding_preferences",
+ "builtin_appliance_share", "email_sender_settings",
+ "trap_forwarding_format", "metrics_collection_settings"]],
+ supports_check_mode=True, )
+
+ try:
+ _validate_params(module)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ job = job_details(rest_obj)
+ job_payload, schedule = create_job(module)
+ curr_resp = fetch_cp_settings(rest_obj)
+ payload, payload_dict = create_payload(module, curr_resp)
+ cifs_payload = create_cifs_payload(module, curr_resp)
+ diff = _diff_payload(curr_resp, payload, cifs_payload, schedule, job)
+ process_check_mode(module, diff)
+ resp, cifs_resp, job_resp = update_console_preferences(module, rest_obj, payload, cifs_payload,
+ job_payload, job, payload_dict, schedule)
+ resp_req = fetch_cp_settings(rest_obj)
+ cp_list = []
+ resp_data = list(filter(lambda d: d['Name'] in CONSOLE_SETTINGS_VALUES, resp_req))
+ for cp in resp_data:
+ cp_data = strip_substr_dict(cp)
+ cp_list.append(cp_data)
+ module.exit_json(msg=SUCCESS_MSG, console_preferences=cp_list)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py
new file mode 100644
index 000000000..03eef19ed
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py
@@ -0,0 +1,751 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_network_address
+short_description: Updates the network configuration on OpenManage Enterprise
+version_added: "2.1.0"
+description:
+ - This module allows the configuration of a DNS and an IPV4 or IPV6 network on OpenManage Enterprise.
+notes:
+ - The configuration changes can only be applied to one interface at a time.
+ - The system management consoles might be unreachable for some time after the configuration changes are applied.
+ - This module supports C(check_mode).
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ enable_nic:
+ description: Enable or disable Network Interface Card (NIC) configuration.
+ type: bool
+ default: true
+ interface_name:
+ description:
+ - "If there are multiple interfaces, network configuration changes can be applied to a single interface using the
+ interface name of the NIC."
+ - If this option is not specified, Primary interface is chosen by default.
+ type: str
+ ipv4_configuration:
+ description:
+ - IPv4 network configuration.
+ - "I(Warning) Ensure that you have an alternate interface to access OpenManage Enterprise as these options can
+ change the current IPv4 address for I(hostname)."
+ type: dict
+ suboptions:
+ enable:
+ description:
+ - Enable or disable access to the network using IPv4.
+ type: bool
+ required: true
+ enable_dhcp:
+ description:
+ - "Enable or disable the automatic request to get an IPv4 address from the IPv4 Dynamic Host Configuration
+ Protocol (DHCP) server"
+ - "If I(enable_dhcp) option is true, OpenManage Enterprise retrieves the IP configuration—IPv4 address,
+ subnet mask, and gateway from a DHCP server on the existing network."
+ type: bool
+ static_ip_address:
+ description:
+ - Static IPv4 address
+ - This option is applicable when I(enable_dhcp) is false.
+ type: str
+ static_subnet_mask:
+ description:
+ - Static IPv4 subnet mask address
+ - This option is applicable when I(enable_dhcp) is false.
+ type: str
+ static_gateway:
+ description:
+ - Static IPv4 gateway address
+ - This option is applicable when I(enable_dhcp) is false.
+ type: str
+ use_dhcp_for_dns_server_names:
+ description:
+ - This option allows to automatically request and obtain a DNS server IPv4 address from the DHCP server.
+ - This option is applicable when I(enable_dhcp) is true.
+ type: bool
+ static_preferred_dns_server:
+ description:
+ - Static IPv4 DNS preferred server
+ - This option is applicable when I(use_dhcp_for_dns_server_names) is false.
+ type: str
+ static_alternate_dns_server:
+ description:
+ - Static IPv4 DNS alternate server
+ - This option is applicable when I(use_dhcp_for_dns_server_names) is false.
+ type: str
+ ipv6_configuration:
+ description:
+ - IPv6 network configuration.
+ - "I(Warning) Ensure that you have an alternate interface to access OpenManage Enterprise as these options can
+ change the current IPv6 address for I(hostname)."
+ type: dict
+ suboptions:
+ enable:
+ description: Enable or disable access to the network using the IPv6.
+ type: bool
+ required: true
+ enable_auto_configuration:
+ description:
+ - "Enable or disable the automatic request to get an IPv6 address from the IPv6 DHCP server or router
+ advertisements(RA)"
+ - "If I(enable_auto_configuration) is true, OME retrieves IP configuration-IPv6 address, prefix, and gateway,
+ from a DHCPv6 server on the existing network"
+ type: bool
+ static_ip_address:
+ description:
+ - Static IPv6 address
+ - This option is applicable when I(enable_auto_configuration) is false.
+ type: str
+ static_prefix_length:
+ description:
+ - Static IPv6 prefix length
+ - This option is applicable when I(enable_auto_configuration) is false.
+ type: int
+ static_gateway:
+ description:
+ - Static IPv6 gateway address
+ - This option is applicable when I(enable_auto_configuration) is false.
+ type: str
+ use_dhcp_for_dns_server_names:
+ description:
+ - This option allows to automatically request and obtain a DNS server IPv6 address from the DHCP server.
+ - This option is applicable when I(enable_auto_configuration) is true
+ type: bool
+ static_preferred_dns_server:
+ description:
+ - Static IPv6 DNS preferred server
+ - This option is applicable when I(use_dhcp_for_dns_server_names) is false.
+ type: str
+ static_alternate_dns_server:
+ description:
+ - Static IPv6 DNS alternate server
+ - This option is applicable when I(use_dhcp_for_dns_server_names) is false.
+ type: str
+ management_vlan:
+ description:
+ - vLAN configuration.
+ - These settings are applicable for OpenManage Enterprise Modular.
+ type: dict
+ suboptions:
+ enable_vlan:
+ description:
+ - Enable or disable vLAN for management.
+ - The vLAN configuration cannot be updated if the I(register_with_dns) field under I(dns_configuration) is true.
+ - "I(WARNING) Ensure that the network cable is plugged to the correct port after the vLAN configuration
+ changes have been made. If not, the configuration change may not be effective."
+ required: true
+ type: bool
+ vlan_id:
+ description:
+ - vLAN ID.
+ - This option is applicable when I(enable_vlan) is true.
+ type: int
+ dns_configuration:
+ description: Domain Name System(DNS) settings.
+ type: dict
+ suboptions:
+ register_with_dns:
+ description:
+ - Register/Unregister I(dns_name) on the DNS Server.
+ - This option cannot be updated if vLAN configuration changes.
+ type: bool
+ use_dhcp_for_dns_domain_name:
+ description: Get the I(dns_domain_name) using a DHCP server.
+ type: bool
+ dns_name:
+ description:
+ - DNS name for I(hostname)
+ - This is applicable when I(register_with_dns) is true.
+ type: str
+ dns_domain_name:
+ description:
+ - Static DNS domain name
+ - This is applicable when I(use_dhcp_for_dns_domain_name) is false.
+ type: str
+ reboot_delay:
+ description:
+ - The time in seconds, after which settings are applied.
+ - This option is not mandatory.
+ type: int
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+'''
+
+EXAMPLES = r'''
+---
+- name: IPv4 network configuration for primary interface
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_nic: true
+ ipv4_configuration:
+ enable: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_for_dns_server_names: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ reboot_delay: 5
+
+- name: IPv6 network configuration for primary interface
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ ipv6_configuration:
+ enable: true
+ enable_auto_configuration: true
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2
+ use_dhcp_for_dns_server_names: true
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+
+- name: Management vLAN configuration for primary interface
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ management_vlan:
+ enable_vlan: true
+ vlan_id: 3344
+ dns_configuration:
+ register_with_dns: false
+ reboot_delay: 1
+
+- name: DNS settings
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ ipv4_configuration:
+ enable: true
+ use_dhcp_for_dns_server_names: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ dns_configuration:
+ register_with_dns: true
+ use_dhcp_for_dns_domain_name: false
+ dns_name: "MX-SVCTAG"
+ dns_domain_name: "dnslocaldomain"
+
+- name: Disbale nic interface eth1
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_nic: false
+ interface_name: eth1
+
+- name: Complete network settings for interface eth1
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_nic: true
+ interface_name: eth1
+ ipv4_configuration:
+ enable: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_for_dns_server_names: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ ipv6_configuration:
+ enable: true
+ enable_auto_configuration: true
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: ffff::2607:f2b1:f081:9
+ use_dhcp_for_dns_server_names: true
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+ dns_configuration:
+ register_with_dns: true
+ use_dhcp_for_dns_domain_name: false
+ dns_name: "MX-SVCTAG"
+ dns_domain_name: "dnslocaldomain"
+ reboot_delay: 5
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the network address configuration change.
+ returned: always
+ sample: Successfully updated network address configuration
+network_configuration:
+ type: dict
+ description: Updated application network address configuration.
+ returned: on success
+ sample: {
+ "Delay": 0,
+ "DnsConfiguration": {
+ "DnsDomainName": "",
+ "DnsName": "MX-SVCTAG",
+ "RegisterWithDNS": false,
+ "UseDHCPForDNSDomainName": true
+ },
+ "EnableNIC": true,
+ "InterfaceName": "eth0",
+ "PrimaryInterface": true,
+ "Ipv4Configuration": {
+ "Enable": true,
+ "EnableDHCP": false,
+ "StaticAlternateDNSServer": "",
+ "StaticGateway": "192.168.0.2",
+ "StaticIPAddress": "192.168.0.3",
+ "StaticPreferredDNSServer": "192.168.0.4",
+ "StaticSubnetMask": "255.255.254.0",
+ "UseDHCPForDNSServerNames": false
+ },
+ "Ipv6Configuration": {
+ "Enable": true,
+ "EnableAutoConfiguration": true,
+ "StaticAlternateDNSServer": "",
+ "StaticGateway": "",
+ "StaticIPAddress": "",
+ "StaticPreferredDNSServer": "",
+ "StaticPrefixLength": 0,
+ "UseDHCPForDNSServerNames": true
+ },
+ "ManagementVLAN": {
+ "EnableVLAN": false,
+ "Id": 1
+ }
+ }
+job_info:
+ description: Details of the job to update in case OME version is >= 3.3.
+ returned: on success
+ type: dict
+ sample: {
+ "Builtin": false,
+ "CreatedBy": "system",
+ "Editable": true,
+ "EndTime": null,
+ "Id": 14902,
+ "JobDescription": "Generic OME runtime task",
+ "JobName": "OMERealtime_Task",
+ "JobStatus": {
+ "Id": 2080,
+ "Name": "New"
+ },
+ "JobType": {
+ "Id": 207,
+ "Internal": true,
+ "Name": "OMERealtime_Task"
+ },
+ "LastRun": null,
+ "LastRunStatus": {
+ "Id": 2080,
+ "Name": "New"
+ },
+ "NextRun": null,
+ "Params": [
+ {
+ "JobId": 14902,
+ "Key": "Nmcli_Update",
+ "Value": "{\"interfaceName\":\"eth0\",\"profileName\":\"eth0\",\"enableNIC\":true,
+ \"ipv4Configuration\":{\"enable\":true,\"enableDHCP\":true,\"staticIPAddress\":\"\",
+ \"staticSubnetMask\":\"\",\"staticGateway\":\"\",\"useDHCPForDNSServerNames\":true,
+ \"staticPreferredDNSServer\":\"\",\"staticAlternateDNSServer\":\"\"},
+ \"ipv6Configuration\":{\"enable\":false,\"enableAutoConfiguration\":true,\"staticIPAddress\":\"\",
+ \"staticPrefixLength\":0,\"staticGateway\":\"\",\"useDHCPForDNSServerNames\":false,
+ \"staticPreferredDNSServer\":\"\",\"staticAlternateDNSServer\":\"\"},
+ \"managementVLAN\":{\"enableVLAN\":false,\"id\":0},\"dnsConfiguration\":{\"registerWithDNS\":false,
+ \"dnsName\":\"\",\"useDHCPForDNSDomainName\":false,\"dnsDomainName\":\"\",\"fqdndomainName\":\"\",
+ \"ipv4CurrentPreferredDNSServer\":\"\",\"ipv4CurrentAlternateDNSServer\":\"\",
+ \"ipv6CurrentPreferredDNSServer\":\"\",\"ipv6CurrentAlternateDNSServer\":\"\"},
+ \"currentSettings\":{\"ipv4Address\":[],\"ipv4Gateway\":\"\",\"ipv4Dns\":[],\"ipv4Domain\":\"\",
+ \"ipv6Address\":[],\"ipv6LinkLocalAddress\":\"\",\"ipv6Gateway\":\"\",\"ipv6Dns\":[],
+ \"ipv6Domain\":\"\"},\"delay\":0,\"primaryInterface\":true,\"modifiedConfigs\":{}}"
+ }
+ ],
+ "Schedule": "startnow",
+ "StartTime": null,
+ "State": "Enabled",
+ "Targets": [],
+ "UpdatedBy": null,
+ "Visible": true
+ }
+error_info:
+ description: Details of the HTTP error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to update the address configuration because a dependent field is missing for Use DHCP
+ for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid configuration .",
+ "MessageArgs": [
+ "Use DHCP for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid
+ configuration"
+ ],
+ "MessageId": "CAPP1304",
+ "RelatedProperties": [],
+ "Resolution": "Make sure that all dependent fields contain valid content and retry the operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+'''
+
+import json
+import socket
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+IP_CONFIG = "ApplicationService/Network/AddressConfiguration"
+JOB_IP_CONFIG = "ApplicationService/Network/AdapterConfigurations"
+POST_IP_CONFIG = "ApplicationService/Actions/Network.ConfigureNetworkAdapter"
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count('.') == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def remove_unwanted_keys(key_list, payload):
+ for key in key_list:
+ if key in payload:
+ payload.pop(key)
+
+
+def format_payload(src_dict):
+ address_payload_map = {"enable_nic": "EnableNIC",
+ "interface_name": "InterfaceName",
+ "enable": "Enable",
+ "enable_dhcp": "EnableDHCP",
+ "static_ip_address": "StaticIPAddress",
+ "static_subnet_mask": "StaticSubnetMask",
+ "static_gateway": "StaticGateway",
+ "use_dhcp_for_dns_server_names": "UseDHCPForDNSServerNames",
+ "static_preferred_dns_server": "StaticPreferredDNSServer",
+ "static_alternate_dns_server": "StaticAlternateDNSServer",
+ "enable_auto_configuration": "EnableAutoConfiguration",
+ "static_prefix_length": "StaticPrefixLength",
+ "enable_vlan": "EnableVLAN",
+ "vlan_id": "Id",
+ "register_with_dns": "RegisterWithDNS",
+ "use_dhcp_for_dns_domain_name": "UseDHCPForDNSDomainName",
+ "dns_name": "DnsName",
+ "dns_domain_name": "DnsDomainName",
+ "reboot_delay": "Delay"}
+ if src_dict:
+ return dict([(address_payload_map[key], val) for key, val in src_dict.items() if val is not None])
+
+
+def get_payload(module):
+ params = module.params
+ backup_params = params.copy()
+ remove_keys = ["hostname", "username", "password", "port"]
+ remove_unwanted_keys(remove_keys, backup_params)
+ ipv4_payload = format_payload(backup_params.get("ipv4_configuration", {}))
+ ipv6_payload = format_payload(backup_params.get("ipv6_configuration", {}))
+ dns_payload = format_payload(backup_params.get("dns_configuration", {}))
+ vlan_payload = format_payload(backup_params.get("management_vlan", {}))
+ return ipv4_payload, ipv6_payload, dns_payload, vlan_payload
+
+
+def _compare_dict_merge(src_dict, new_dict, param_list):
+ diff = 0
+ for parm in param_list:
+ val = new_dict.get(parm)
+ if val is not None:
+ if val != src_dict.get(parm):
+ src_dict[parm] = val
+ diff += 1
+ return diff
+
+
+def update_ipv4_payload(src_dict, new_dict):
+ diff = 0
+ if new_dict:
+ if new_dict.get("Enable") != src_dict.get("Enable"): # Mandatory
+ src_dict["Enable"] = new_dict.get("Enable")
+ diff += 1
+ if new_dict.get("Enable"):
+ tmp_dict = {"EnableDHCP": ["StaticIPAddress", "StaticSubnetMask", "StaticGateway"],
+ "UseDHCPForDNSServerNames": ["StaticPreferredDNSServer", "StaticAlternateDNSServer"]}
+ for key, val in tmp_dict.items():
+ if new_dict.get(key) is not None:
+ if new_dict.get(key) != src_dict.get(key):
+ src_dict[key] = new_dict.get(key)
+ diff += 1
+ if not new_dict.get(key):
+ diff = diff + _compare_dict_merge(src_dict, new_dict, val)
+ return diff
+
+
+def update_ipv6_payload(src_dict, new_dict):
+ diff = 0
+ if new_dict:
+ if new_dict.get("Enable") != src_dict.get("Enable"): # Mandatory
+ src_dict["Enable"] = new_dict.get("Enable")
+ diff += 1
+ if new_dict.get("Enable"):
+ tmp_dict = {"EnableAutoConfiguration": ["StaticIPAddress", "StaticPrefixLength", "StaticGateway"],
+ "UseDHCPForDNSServerNames": ["StaticPreferredDNSServer", "StaticAlternateDNSServer"]}
+ for key, val in tmp_dict.items():
+ if new_dict.get(key) is not None:
+ if new_dict.get(key) != src_dict.get(key):
+ src_dict[key] = new_dict.get(key)
+ diff += 1
+ if not new_dict.get(key):
+ diff = diff + _compare_dict_merge(src_dict, new_dict, val)
+ return diff
+
+
+def update_dns_payload(src_dict, new_dict):
+ diff = 0
+ if new_dict:
+ mkey = "RegisterWithDNS"
+ if new_dict.get(mkey) is not None:
+ if new_dict.get(mkey) != src_dict.get(mkey):
+ src_dict[mkey] = new_dict.get(mkey)
+ diff += 1
+ if new_dict.get(mkey) is True:
+ diff = diff + _compare_dict_merge(src_dict, new_dict, ["DnsName"])
+ mkey = "UseDHCPForDNSDomainName"
+ if new_dict.get(mkey) is not None:
+ if new_dict.get(mkey) != src_dict.get(mkey):
+ src_dict[mkey] = new_dict.get(mkey)
+ diff += 1
+ if not new_dict.get(mkey):
+ diff = diff + _compare_dict_merge(src_dict, new_dict, ["DnsDomainName"])
+ return diff
+
+
+def update_vlan_payload(src_dict, new_dict):
+ diff = 0
+ if new_dict:
+ mkey = "EnableVLAN"
+ if new_dict.get(mkey) is not None:
+ if new_dict.get(mkey) != src_dict.get(mkey):
+ src_dict[mkey] = new_dict.get(mkey)
+ diff += 1
+ if new_dict.get(mkey) is True:
+ diff = diff + _compare_dict_merge(src_dict, new_dict, ["Id"])
+ return diff
+
+
+def get_network_config_data(rest_obj, module):
+ try:
+ interface = module.params.get("interface_name")
+ resp = rest_obj.invoke_request("GET", JOB_IP_CONFIG)
+ adapter_list = resp.json_data.get("value")
+ int_adp = None
+ pri_adp = None
+ if adapter_list:
+ for adp in adapter_list:
+ if interface and adp.get("InterfaceName") == interface:
+ int_adp = adp
+ break
+ if adp.get("PrimaryInterface"):
+ pri_adp = adp
+ if interface and int_adp is None:
+ module.fail_json(msg="The 'interface_name' value provided {0} is invalid".format(interface))
+ elif int_adp:
+ return int_adp, "POST", POST_IP_CONFIG
+ else:
+ return pri_adp, "POST", POST_IP_CONFIG
+ except HTTPError as err:
+ pass
+ except Exception as err:
+ raise err
+ resp = rest_obj.invoke_request("GET", IP_CONFIG)
+ return resp.json_data, "PUT", IP_CONFIG
+
+
+def get_updated_payload(rest_obj, module, ipv4_payload, ipv6_payload, dns_payload, vlan_payload):
+ current_setting = {}
+ remove_keys = ["@odata.context", "@odata.type", "@odata.id", "CurrentSettings"]
+ current_setting, rest_method, uri = get_network_config_data(rest_obj, module)
+ remove_unwanted_keys(remove_keys, current_setting)
+ payload_dict = {"Ipv4Configuration": [ipv4_payload, update_ipv4_payload],
+ "Ipv6Configuration": [ipv6_payload, update_ipv6_payload],
+ "DnsConfiguration": [dns_payload, update_dns_payload],
+ "ManagementVLAN": [vlan_payload, update_vlan_payload]}
+ diff = 0
+ enable_nic = module.params.get("enable_nic")
+ if current_setting.get("EnableNIC") != enable_nic:
+ current_setting["EnableNIC"] = enable_nic
+ diff += 1
+ if enable_nic:
+ for config, pload in payload_dict.items():
+ if pload[0]:
+ diff = diff + pload[1](current_setting.get(config), pload[0])
+ delay = module.params.get("reboot_delay")
+ if delay is not None:
+ if current_setting["Delay"] != delay:
+ current_setting["Delay"] = delay
+ if diff == 0:
+ module.exit_json(
+ msg=NO_CHANGES_FOUND, network_configuration=current_setting)
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHANGES_FOUND)
+ return current_setting, rest_method, uri
+
+
+def validate_ipaddress(module, ip_type, config, var_list, ip_func):
+ ipv_input = module.params.get(config)
+ if ipv_input:
+ for ipname in var_list:
+ val = ipv_input.get(ipname)
+ if val and not ip_func(val):
+ module.fail_json(msg="Invalid {0} address provided for the {1}".format(ip_type, ipname))
+
+
+def validate_input(module):
+ ip_addr = ["static_ip_address", "static_gateway", "static_preferred_dns_server", "static_alternate_dns_server"]
+ validate_ipaddress(module, "IPv6", "ipv6_configuration", ip_addr, validate_ip_v6_address)
+ ip_addr.append("static_subnet_mask")
+ validate_ipaddress(module, "IPv4", "ipv4_configuration", ip_addr, validate_ip_address)
+ delay = module.params.get("reboot_delay")
+ if delay and delay < 0:
+ module.fail_json(msg="Invalid value provided for 'reboot_delay'")
+
+
+def main():
+ ipv4_options = {"enable": {"required": True, "type": "bool"},
+ "enable_dhcp": {"required": False, "type": "bool"},
+ "static_ip_address": {"required": False, "type": "str"},
+ "static_subnet_mask": {"required": False, "type": "str"},
+ "static_gateway": {"required": False, "type": "str"},
+ "use_dhcp_for_dns_server_names": {"required": False, "type": "bool"},
+ "static_preferred_dns_server": {"required": False, "type": "str"},
+ "static_alternate_dns_server": {"required": False, "type": "str"}}
+ ipv6_options = {"enable": {"required": True, "type": "bool"},
+ "enable_auto_configuration": {"required": False, "type": "bool"},
+ "static_ip_address": {"required": False, "type": "str"},
+ "static_prefix_length": {"required": False, "type": "int"},
+ "static_gateway": {"required": False, "type": "str"},
+ "use_dhcp_for_dns_server_names": {"required": False, "type": "bool"},
+ "static_preferred_dns_server": {"required": False, "type": "str"},
+ "static_alternate_dns_server": {"required": False, "type": "str"}}
+ dns_options = {"register_with_dns": {"required": False, "type": "bool"},
+ "use_dhcp_for_dns_domain_name": {"required": False, "type": "bool"},
+ "dns_name": {"required": False, "type": "str"},
+ "dns_domain_name": {"required": False, "type": "str"}}
+ management_vlan = {"enable_vlan": {"required": True, "type": "bool"},
+ "vlan_id": {"required": False, "type": "int"}}
+
+ specs = {
+ "enable_nic": {"required": False, "type": "bool", "default": True},
+ "interface_name": {"required": False, "type": "str"},
+ "ipv4_configuration":
+ {"required": False, "type": "dict", "options": ipv4_options,
+ "required_if": [
+ ['enable', True, ('enable_dhcp',), True],
+ ['enable_dhcp', False, ('static_ip_address', 'static_subnet_mask', "static_gateway"), False],
+ ['use_dhcp_for_dns_server_names', False,
+ ('static_preferred_dns_server', 'static_alternate_dns_server'), True]
+ ]
+ },
+ "ipv6_configuration":
+ {"required": False, "type": "dict", "options": ipv6_options,
+ "required_if": [
+ ['enable', True, ('enable_auto_configuration',), True],
+ ['enable_auto_configuration', False, ('static_ip_address', 'static_prefix_length', "static_gateway"),
+ False],
+ ['use_dhcp_for_dns_server_names', False,
+ ('static_preferred_dns_server', 'static_alternate_dns_server'), True]
+ ]
+ },
+ "dns_configuration":
+ {"required": False, "type": "dict", "options": dns_options,
+ "required_if": [
+ ['register_with_dns', True, ('dns_name',), False],
+ ['use_dhcp_for_dns_domain_name', False, ('dns_domain_name',)]
+ ]
+ },
+ "management_vlan":
+ {"required": False, "type": "dict", "options": management_vlan,
+ "required_if": [
+ ['enable_vlan', True, ('vlan_id',), True]
+ ]
+ },
+ "reboot_delay": {"required": False, "type": "int"}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ["enable_nic", True,
+ ("ipv4_configuration", "ipv6_configuration", "dns_configuration", "management_vlan"), True]
+ ],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ validate_input(module)
+ ipv4_payload, ipv6_payload, dns_payload, vlan_payload = get_payload(module)
+ updated_payload, rest_method, uri = get_updated_payload(
+ rest_obj, module, ipv4_payload, ipv6_payload, dns_payload, vlan_payload)
+ resp = rest_obj.invoke_request(rest_method, uri, data=updated_payload, api_timeout=150)
+ if rest_method == "POST":
+ module.exit_json(msg="Successfully triggered job to update network address configuration.",
+ network_configuration=updated_payload, job_info=resp.json_data, changed=True)
+ module.exit_json(msg="Successfully triggered task to update network address configuration.",
+ network_configuration=resp.json_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py
new file mode 100644
index 000000000..3659d8a3d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_network_proxy
+short_description: Updates the proxy configuration on OpenManage Enterprise
+version_added: "2.1.0"
+description: This module allows to configure a network proxy on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ enable_proxy:
+ description:
+ - Enables or disables the HTTP proxy configuration.
+ - If I(enable proxy) is false, then the HTTP proxy configuration is set to its default value.
+ required: true
+ type: bool
+ ip_address:
+ description:
+ - Proxy server address.
+ - This option is mandatory when I(enable_proxy) is true.
+ type: str
+ proxy_port:
+ description:
+ - Proxy server's port number.
+ - This option is mandatory when I(enable_proxy) is true.
+ type: int
+ enable_authentication:
+ description:
+ - Enable or disable proxy authentication.
+ - If I(enable_authentication) is true, I(proxy_username) and I(proxy_password) must be provided.
+ - If I(enable_authentication) is false, the proxy username and password are set to its default values.
+ type: bool
+ proxy_username:
+ description:
+ - Proxy server username.
+ - This option is mandatory when I(enable_authentication) is true.
+ type: str
+ proxy_password:
+ description:
+ - Proxy server password.
+ - This option is mandatory when I(enable_authentication) is true.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Update proxy configuration and enable authentication
+ dellemc.openmanage.ome_application_network_proxy:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_proxy: true
+ ip_address: "192.168.0.2"
+ proxy_port: 444
+ enable_authentication: true
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+
+- name: Reset proxy authentication
+ dellemc.openmanage.ome_application_network_proxy:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_proxy: true
+ ip_address: "192.168.0.2"
+ proxy_port: 444
+ enable_authentication: false
+
+- name: Reset proxy configuration
+ dellemc.openmanage.ome_application_network_proxy:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_proxy: false
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the network proxy configuration change.
+ returned: always
+ sample: "Successfully updated network proxy configuration."
+proxy_configuration:
+ type: dict
+ description: Updated application network proxy configuration.
+ returned: success
+ sample: {
+ "EnableAuthentication": true,
+ "EnableProxy": true,
+ "IpAddress": "192.168.0.2",
+ "Password": null,
+ "PortNumber": 444,
+ "Username": "root"
+ }
+error_info:
+ description: Details of the HTTP error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to complete the request because the input value
+ for PortNumber is missing or an invalid value is entered.",
+ "MessageArgs": [
+ "PortNumber"
+ ],
+ "MessageId": "CGEN6002",
+ "RelatedProperties": [],
+ "Resolution": "Enter a valid value and retry the operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+PROXY_CONFIG = "ApplicationService/Network/ProxyConfiguration"
+CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied."
+CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No Changes found to be applied."
+
+
+def remove_unwanted_keys(key_list, payload):
+ [payload.pop(key) for key in key_list if key in payload]
+
+
+def validate_check_mode_for_network_proxy(payload_diff, module):
+ """
+ check mode support validation
+ :param payload_diff: payload difference
+ :param module: ansible module object
+ :return: None
+ """
+ if module.check_mode:
+ if payload_diff:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
+ else:
+ module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False)
+
+
+def get_payload(module):
+ params = module.params
+ proxy_payload_map = {
+ "ip_address": "IpAddress",
+ "proxy_port": "PortNumber",
+ "enable_proxy": "EnableProxy",
+ "proxy_username": "Username",
+ "proxy_password": "Password",
+ "enable_authentication": "EnableAuthentication"
+ }
+ backup_params = params.copy()
+ remove_keys = ["hostname", "username", "password", "port", "ca_path", "validate_certs", "timeout"]
+ remove_unwanted_keys(remove_keys, backup_params)
+ payload = dict([(proxy_payload_map[key], val) for key, val in backup_params.items() if val is not None])
+ return payload
+
+
+def get_updated_payload(rest_obj, module, payload):
+ current_setting = {}
+ if not any(payload):
+ module.fail_json(msg="Unable to configure the proxy because proxy configuration settings are not provided.")
+ else:
+ params = module.params
+ remove_keys = ["@odata.context", "@odata.type", "@odata.id", "Password"]
+ enable_authentication = params.get("enable_authentication")
+ if enable_authentication is False:
+ """when enable auth is disabled, ignore proxy username and password """
+ remove_keys.append("Username")
+ payload.pop('Username', None)
+ payload.pop('Password', None)
+ resp = rest_obj.invoke_request("GET", PROXY_CONFIG)
+ current_setting = resp.json_data
+ remove_unwanted_keys(remove_keys, current_setting)
+ diff = any(key in current_setting and val != current_setting[key] for key, val in payload.items())
+ validate_check_mode_for_network_proxy(diff, module)
+ if not diff:
+ module.exit_json(msg="No changes made to proxy configuration as entered values are the same as current "
+ "configuration values.")
+ else:
+ current_setting.update(payload)
+ return current_setting
+
+
+def main():
+ specs = {
+ "ip_address": {"required": False, "type": "str"},
+ "proxy_port": {"required": False, "type": "int"},
+ "enable_proxy": {"required": True, "type": "bool"},
+ "proxy_username": {"required": False, "type": "str"},
+ "proxy_password": {"required": False, "type": "str", "no_log": True},
+ "enable_authentication": {"required": False, "type": "bool"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['enable_proxy', True, ['ip_address', 'proxy_port']],
+ ['enable_authentication', True, ['proxy_username', 'proxy_password']], ],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ payload = get_payload(module)
+ updated_payload = get_updated_payload(rest_obj, module, payload)
+ resp = rest_obj.invoke_request("PUT", PROXY_CONFIG, data=updated_payload)
+ module.exit_json(msg="Successfully updated network proxy configuration.",
+ proxy_configuration=resp.json_data,
+ changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py
new file mode 100644
index 000000000..2dfd13a58
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_network_settings
+short_description: This module allows you to configure the session inactivity timeout settings
+version_added: "4.4.0"
+description:
+ - This module allows you to configure the session inactivity timeout settings on OpenManage Enterprise
+ and OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ session_inactivity_timeout:
+ description: Session inactivity timeout settings.
+ type: dict
+ suboptions:
+ enable_universal_timeout:
+ description:
+ - Enable or disable the universal inactivity timeout.
+ type: bool
+ universal_timeout:
+ description:
+ - Duration of inactivity in minutes after which all sessions end.
+ - This is applicable when I(enable_universal_timeout) is C(true).
+ - This is mutually exclusive with I(api_timeout), I(gui_timeout), I(ssh_timeout) and I(serial_timeout).
+ type: float
+ api_timeout:
+ description:
+ - Duration of inactivity in minutes after which the API session ends.
+ - This is mutually exclusive with I(universal_timeout).
+ type: float
+ api_sessions:
+ description:
+ - The maximum number of API sessions to be allowed.
+ type: int
+ gui_timeout:
+ description:
+ - Duration of inactivity in minutes after which the web interface of
+ Graphical User Interface (GUI) session ends.
+ - This is mutually exclusive with I(universal_timeout).
+ type: float
+ gui_sessions:
+ description:
+ - The maximum number of GUI sessions to be allowed.
+ type: int
+ ssh_timeout:
+ description:
+ - Duration of inactivity in minutes after which the SSH session ends.
+ - This is applicable only for OpenManage Enterprise Modular.
+ - This is mutually exclusive with I(universal_timeout).
+ type: float
+ ssh_sessions:
+ description:
+ - The maximum number of SSH sessions to be allowed.
+ - This is applicable to OME-M only.
+ type: int
+ serial_timeout:
+ description:
+ - Duration of inactivity in minutes after which the serial console session ends.
+ - This is applicable only for OpenManage Enterprise Modular.
+ - This is mutually exclusive with I(universal_timeout).
+ type: float
+ serial_sessions:
+ description:
+ - The maximum number of serial console sessions to be allowed.
+ - This is applicable only for OpenManage Enterprise Modular.
+ type: int
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - To configure other network settings such as network address, web server, and so on, refer to the respective
+ OpenManage Enterprise application network setting modules.
+ - This module supports C(check_mode).
+author:
+ - Sachin Apagundi(@sachin-apa)
+'''
+
+EXAMPLES = """
+---
+- name: Configure universal inactivity timeout
+ ome_application_network_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ session_inactivity_timeout:
+ enable_universal_timeout: true
+ universal_timeout: 30
+ api_sessions: 90
+ gui_sessions: 5
+ ssh_sessions: 2
+ serial_sessions: 1
+
+- name: Configure API and GUI timeout and sessions
+ ome_application_network_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ session_inactivity_timeout:
+ api_timeout: 20
+ api_sessions: 100
+ gui_timeout: 25
+ gui_sessions: 5
+
+- name: Configure timeout and sessions for all parameters
+ ome_application_network_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ session_inactivity_timeout:
+ api_timeout: 20
+ api_sessions: 100
+ gui_timeout: 15
+ gui_sessions: 5
+ ssh_timeout: 30
+ ssh_sessions: 2
+ serial_timeout: 35
+ serial_sessions: 1
+
+- name: Disable universal timeout and configure timeout and sessions for other parameters
+ ome_application_network_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ session_inactivity_timeout:
+ enable_universal_timeout: false
+ api_timeout: 20
+ api_sessions: 100
+ gui_timeout: 15
+ gui_sessions: 5
+ ssh_timeout: 30
+ ssh_sessions: 2
+ serial_timeout: 35
+ serial_sessions: 1
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the Session timeout settings.
+ returned: always
+ sample: "Successfully updated the session timeout settings."
+session_inactivity_setting:
+ type: dict
+ description: Returned when session inactivity timeout settings are updated successfully.
+ returned: success
+ sample: [
+ {
+ "SessionType": "API",
+ "MaxSessions": 32,
+ "SessionTimeout": 99600,
+ "MinSessionTimeout": 60000,
+ "MaxSessionTimeout": 86400000,
+ "MinSessionsAllowed": 1,
+ "MaxSessionsAllowed": 100,
+ "MaxSessionsConfigurable": true,
+ "SessionTimeoutConfigurable": true
+ },
+ {
+ "SessionType": "GUI",
+ "MaxSessions": 6,
+ "SessionTimeout": 99600,
+ "MinSessionTimeout": 60000,
+ "MaxSessionTimeout": 7200000,
+ "MinSessionsAllowed": 1,
+ "MaxSessionsAllowed": 6,
+ "MaxSessionsConfigurable": true,
+ "SessionTimeoutConfigurable": true
+ },
+ {
+ "SessionType": "SSH",
+ "MaxSessions": 4,
+ "SessionTimeout": 99600,
+ "MinSessionTimeout": 60000,
+ "MaxSessionTimeout": 10800000,
+ "MinSessionsAllowed": 1,
+ "MaxSessionsAllowed": 4,
+ "MaxSessionsConfigurable": true,
+ "SessionTimeoutConfigurable": true
+ },
+ {
+ "SessionType": "Serial",
+ "MaxSessions": 1,
+ "SessionTimeout": 99600,
+ "MinSessionTimeout": 60000,
+ "MaxSessionTimeout": 86400000,
+ "MinSessionsAllowed": 1,
+ "MaxSessionsAllowed": 1,
+ "MaxSessionsConfigurable": false,
+ "SessionTimeoutConfigurable": true
+ },
+ {
+ "SessionType": "UniversalTimeout",
+ "MaxSessions": 0,
+ "SessionTimeout": -1,
+ "MinSessionTimeout": -1,
+ "MaxSessionTimeout": 86400000,
+ "MinSessionsAllowed": 0,
+ "MaxSessionsAllowed": 0,
+ "MaxSessionsConfigurable": false,
+ "SessionTimeoutConfigurable": true
+ }
+ ]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CUSR1233",
+ "RelatedProperties": [],
+ "Message": "The number of allowed concurrent sessions for API must be between 1 and 100 sessions.",
+ "MessageArgs": [
+ "API",
+ "1",
+ "100"
+ ],
+ "Severity": "Critical",
+ "Resolution": "Enter values in the correct range and retry the operation."
+ }
+ ]
+ }
+ }
+"""
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+SUCCESS_MSG = "Successfully updated the session timeout settings."
+SESSION_INACTIVITY_GET = "SessionService/SessionConfiguration"
+SESSION_INACTIVITY_POST = "SessionService/Actions/SessionService.SessionConfigurationUpdate"
+NO_CHANGES = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+session_type_map = {
+ "UniversalTimeout": {"SessionTimeout": "universal_timeout", "MaxSessions": None},
+ "API": {"SessionTimeout": "api_timeout", "MaxSessions": "api_sessions"},
+ "GUI": {"SessionTimeout": "gui_timeout", "MaxSessions": "gui_sessions"},
+ "SSH": {"SessionTimeout": "ssh_timeout", "MaxSessions": "ssh_sessions"},
+ "Serial": {"SessionTimeout": "serial_timeout", "MaxSessions": "serial_sessions"}
+}
+
+
+def fetch_session_inactivity_settings(rest_obj):
+ final_resp = rest_obj.invoke_request("GET", SESSION_INACTIVITY_GET)
+ ret_data = final_resp.json_data.get('value')
+ return ret_data
+
+
+def update_session_inactivity_settings(rest_obj, payload):
+ final_resp = rest_obj.invoke_request("POST", SESSION_INACTIVITY_POST, data=payload)
+ return final_resp
+
+
+def update_payload(module, curr_payload):
+ diff = 0
+ sit_param = module.params.get("session_inactivity_timeout").copy()
+ eut = sit_param.get("enable_universal_timeout")
+ eut_enabled = is_universal_timeout_enabled(curr_payload)
+ if eut is False:
+ sit_param["universal_timeout"] = -1 # to disable universal timeout set value to -1
+ for up in curr_payload:
+ stm = session_type_map.get(up.get("SessionType"), None)
+ if stm and not ((up.get("SessionType") == "UniversalTimeout") and (eut is None)):
+ sess_time = get_value(sit_param, up, stm.get("SessionTimeout", None), "SessionTimeout")
+ if sess_time != up.get("SessionTimeout") and ((not eut_enabled) or eut is not None):
+ diff += 1
+ up["SessionTimeout"] = sess_time
+ max_sess = get_value(sit_param, up, stm.get("MaxSessions", None), "MaxSessions")
+ if max_sess != up.get("MaxSessions"):
+ diff += 1
+ up["MaxSessions"] = max_sess
+ return curr_payload, diff
+
+
+def is_universal_timeout_enabled(payload):
+ u_sess_timeout = -1
+ for up in payload:
+ if up.get("SessionType") == "UniversalTimeout":
+ u_sess_timeout = up.get("SessionTimeout")
+ break
+ return u_sess_timeout > 0
+
+
+def get_value(input_module, resp, mod_key, attr_key):
+ ret_value = input_module.get(mod_key)
+ if ret_value is None:
+ ret_value = resp.get(attr_key)
+ elif attr_key == "SessionTimeout" and ret_value != -1:
+ ret_value = ret_value * 60000
+ return ret_value
+
+
+def process_check_mode(module, diff):
+ if not diff:
+ module.exit_json(msg=NO_CHANGES)
+ elif module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+
+
+def main():
+ session_inactivity_options = {
+ "enable_universal_timeout": {"type": "bool", "required": False},
+ "universal_timeout": {"type": "float", "required": False},
+ "api_timeout": {"type": "float", "required": False},
+ "api_sessions": {"type": "int", "required": False},
+ "gui_timeout": {"type": "float", "required": False},
+ "gui_sessions": {"type": "int", "required": False},
+ "ssh_timeout": {"type": "float", "required": False},
+ "ssh_sessions": {"type": "int", "required": False},
+ "serial_timeout": {"type": "float", "required": False},
+ "serial_sessions": {"type": "int", "required": False},
+ }
+ specs = {
+ "session_inactivity_timeout": {
+ "required": False,
+ "type": "dict",
+ "options": session_inactivity_options,
+ "mutually_exclusive": [
+ ['universal_timeout', 'api_timeout'],
+ ['universal_timeout', 'gui_timeout'],
+ ['universal_timeout', 'ssh_timeout'],
+ ['universal_timeout', 'serial_timeout']
+ ],
+ "required_if": [
+ ['enable_universal_timeout', True, ['universal_timeout']]
+ ]
+ }
+ }
+ specs.update(ome_auth_params)
+
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ curr_resp = fetch_session_inactivity_settings(rest_obj)
+ payload, diff = update_payload(module, curr_resp)
+ process_check_mode(module, diff)
+ resp = update_session_inactivity_settings(rest_obj, payload)
+ module.exit_json(msg=SUCCESS_MSG,
+ session_inactivity_setting=resp.json_data, changed=True)
+
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (
+ IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError,
+ OSError) as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py
new file mode 100644
index 000000000..381ef3191
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_network_time
+short_description: Updates the network time on OpenManage Enterprise
+version_added: "2.1.0"
+description: This module allows the configuration of network time on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ enable_ntp:
+ description:
+ - Enables or disables Network Time Protocol(NTP).
+ - If I(enable_ntp) is false, then the NTP addresses reset to their default values.
+ required: true
+ type: bool
+ system_time:
+ description:
+ - Time in the current system.
+ - This option is only applicable when I(enable_ntp) is false.
+ - This option must be provided in following format 'yyyy-mm-dd hh:mm:ss'.
+ type: str
+ time_zone:
+ description:
+ - The valid timezone ID to be used.
+ - This option is applicable for both system time and NTP time synchronization.
+ type: str
+ primary_ntp_address:
+ description:
+ - The primary NTP address.
+ - This option is applicable when I(enable_ntp) is true.
+ type: str
+ secondary_ntp_address1:
+ description:
+ - The first secondary NTP address.
+ - This option is applicable when I(enable_ntp) is true.
+ type: str
+ secondary_ntp_address2:
+ description:
+ - The second secondary NTP address.
+ - This option is applicable when I(enable_ntp) is true.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Configure system time
+ dellemc.openmanage.ome_application_network_time:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_ntp: false
+ system_time: "2020-03-31 21:35:18"
+ time_zone: "TZ_ID_11"
+
+- name: Configure NTP server for time synchronization
+ dellemc.openmanage.ome_application_network_time:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_ntp: true
+ time_zone: "TZ_ID_66"
+ primary_ntp_address: "192.168.0.2"
+ secondary_ntp_address1: "192.168.0.2"
+ secondary_ntp_address2: "192.168.0.4"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the network time configuration change.
+ returned: always
+ sample: "Successfully configured network time."
+proxy_configuration:
+ type: dict
+ description: Updated application network time configuration.
+ returned: success
+ sample: {
+ "EnableNTP": false,
+ "JobId": null,
+ "PrimaryNTPAddress": null,
+ "SecondaryNTPAddress1": null,
+ "SecondaryNTPAddress2": null,
+ "SystemTime": null,
+ "TimeSource": "Local Clock",
+ "TimeZone": "TZ_ID_1",
+ "TimeZoneIdLinux": null,
+ "TimeZoneIdWindows": null,
+ "UtcTime": null
+ }
+error_info:
+ description: Details of the HTTP error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to complete the request because the input value
+ for SystemTime is missing or an invalid value is entered.",
+ "MessageArgs": [
+ "SystemTime"
+ ],
+ "MessageId": "CGEN6002",
+ "RelatedProperties": [],
+ "Resolution": "Enter a valid value and retry the operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+TIME_CONFIG = "ApplicationService/Network/TimeConfiguration"
+TIME_ZONE = "ApplicationService/Network/TimeZones"
+
+
+def remove_unwanted_keys(key_list, payload):
+ [payload.pop(key) for key in key_list if key in payload]
+
+
+def get_payload(module):
+ params = module.params
+ proxy_payload_map = {
+ "enable_ntp": "EnableNTP",
+ "time_zone": "TimeZone",
+ "system_time": "SystemTime",
+ "primary_ntp_address": "PrimaryNTPAddress",
+ "secondary_ntp_address1": "SecondaryNTPAddress1",
+ "secondary_ntp_address2": "SecondaryNTPAddress2"
+ }
+ backup_params = params.copy()
+ remove_keys = ["hostname", "username", "password", "port", "ca_path", "validate_certs", "timeout"]
+ remove_unwanted_keys(remove_keys, backup_params)
+ payload = dict([(proxy_payload_map[key], val) for key, val in backup_params.items() if val is not None])
+ return payload
+
+
+def update_time_config_output(back_up_settings):
+ remove_keys = ["@odata.context", "@odata.type", "@odata.id"]
+ remove_unwanted_keys(remove_keys, back_up_settings)
+ back_up_settings.update({"JobId": None})
+
+
+def get_updated_payload(rest_obj, module, payload):
+ remove_keys = ["@odata.context", "@odata.type", "@odata.id", "TimeZoneIdLinux", "TimeZoneIdWindows", "TimeSource", "UtcTime"]
+ resp = rest_obj.invoke_request("GET", TIME_CONFIG, api_timeout=150)
+ current_setting = resp.json_data
+ back_up_settings = current_setting.copy()
+ remove_unwanted_keys(remove_keys, current_setting)
+ diff = any(key in current_setting and val != current_setting[key] for key, val in payload.items())
+ if module.check_mode:
+ if diff:
+ module.exit_json(changed=True, msg="Changes found to be applied to the time configuration.")
+ else:
+ module.exit_json(changed=False, msg="No changes found to be applied to the time configuration.")
+ else:
+ if diff:
+ current_setting.update(payload)
+ else:
+ update_time_config_output(back_up_settings)
+ module.exit_json(changed=False, msg="No changes made to the time configuration as the entered"
+ " values are the same as the current configuration.", time_configuration=back_up_settings)
+ return current_setting
+
+
+def validate_time_zone(module, rest_obj):
+ params = module.params
+ time_zone = params.get("time_zone", None)
+ if time_zone is not None:
+ time_zone_resp = rest_obj.invoke_request("GET", TIME_ZONE)
+ time_zone_val = time_zone_resp.json_data["value"]
+ time_id_list = [time_dict["Id"] for time_dict in time_zone_val]
+ if time_zone not in time_id_list:
+ sorted_time_id_list = sorted(time_id_list, key=lambda time_id: [int(i) for i in time_id.split("_") if i.isdigit()])
+ module.fail_json(msg="Provide valid time zone.Choices are {0}".format(",".join(sorted_time_id_list)))
+
+
+def validate_input(module):
+ system_time = module.params.get("system_time")
+ enable_ntp = module.params["enable_ntp"]
+ primary_ntp_address = module.params.get("primary_ntp_address")
+ secondary_ntp_address1 = module.params.get("secondary_ntp_address1")
+ secondary_ntp_address2 = module.params.get("secondary_ntp_address2")
+ if enable_ntp is True and system_time is not None:
+ module.fail_json(msg="When enable NTP is true,the option system time is not accepted.")
+ if enable_ntp is False and any([primary_ntp_address, secondary_ntp_address1, secondary_ntp_address2]):
+ module.fail_json(msg="When enable NTP is false,the option(s) primary_ntp_address, secondary_ntp_address1 and secondary_ntp_address2 is not accepted.")
+
+
+def main():
+ specs = {
+ "enable_ntp": {"required": True, "type": "bool"},
+ "time_zone": {"required": False, "type": "str"},
+ "system_time": {"required": False, "type": "str"},
+ "primary_ntp_address": {"required": False, "type": "str"},
+ "secondary_ntp_address1": {"required": False, "type": "str"},
+ "secondary_ntp_address2": {"required": False, "type": "str"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['enable_ntp', False, ('time_zone', 'system_time',), True],
+ ['enable_ntp', True, ('time_zone', 'primary_ntp_address',
+ 'secondary_ntp_address1', 'secondary_ntp_address2'), True]],
+ mutually_exclusive=[['system_time', 'primary_ntp_address'],
+ ['system_time', 'secondary_ntp_address1'],
+ ['system_time', 'secondary_ntp_address2']],
+ supports_check_mode=True,
+ )
+ try:
+ validate_input(module)
+ with RestOME(module.params, req_session=False) as rest_obj:
+ validate_time_zone(module, rest_obj)
+ payload = get_payload(module)
+ updated_payload = get_updated_payload(rest_obj, module, payload)
+ resp = rest_obj.invoke_request("PUT", TIME_CONFIG, data=updated_payload, api_timeout=150)
+ module.exit_json(msg="Successfully configured network time.", time_configuration=resp.json_data,
+ changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py
new file mode 100644
index 000000000..adee29dc6
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 6.1.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_network_webserver
+short_description: Updates the Web server configuration on OpenManage Enterprise
+version_added: "2.1.0"
+description: This module allows to configure a network web server on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ webserver_port:
+ description:
+ - Port number used by OpenManage Enterprise to establish a secure server connection.
+ - "I(WARNING) A change in port number results in a loss of connectivity in the current session
+ for more than a minute."
+ type: int
+ webserver_timeout:
+ description:
+ - The duration in minutes after which a web user interface session is automatically disconnected.
+ - If a change is made to the session timeout, it will only take effect after the next log in.
+ type: int
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Update web server port and session time out
+ dellemc.openmanage.ome_application_network_webserver:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ webserver_port: 9443
+ webserver_timeout: 20
+
+- name: Update session time out
+ dellemc.openmanage.ome_application_network_webserver:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ webserver_timeout: 30
+
+- name: Update web server port
+ dellemc.openmanage.ome_application_network_webserver:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ webserver_port: 8443
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the network web server configuration change.
+ returned: always
+ sample: "Successfully updated network web server configuration."
+webserver_configuration:
+ type: dict
+ description: Updated application network web server configuration.
+ returned: success
+ sample: {
+ "TimeOut": 20,
+ "PortNumber": 443,
+ "EnableWebServer": true
+ }
+error_info:
+ description: Details of the HTTP error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to complete the request because the input value
+ for PortNumber is missing or an invalid value is entered.",
+ "MessageArgs": [
+ "PortNumber"
+ ],
+ "MessageId": "CGEN6002",
+ "RelatedProperties": [],
+ "Resolution": "Enter a valid value and retry the operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+WEBSERVER_CONFIG = "ApplicationService/Network/WebServerConfiguration"
+
+
+def get_updated_payload(rest_obj, module):
+ params = module.params
+ resp = rest_obj.invoke_request("GET", WEBSERVER_CONFIG)
+ current_setting = resp.json_data
+ port_changed = 0
+ # Remove odata keys ["@odata.context", "@odata.type", "@odata.id"]
+ cp = current_setting.copy()
+ klist = cp.keys()
+ for k in klist:
+ if str(k).lower().startswith('@odata'):
+ current_setting.pop(k)
+ diff = 0
+ webserver_payload_map = {
+ "webserver_port": "PortNumber",
+ "webserver_timeout": "TimeOut",
+ }
+ for config, pload in webserver_payload_map.items():
+ pval = params.get(config)
+ if pval is not None:
+ if current_setting.get(pload) != pval:
+ current_setting[pload] = pval
+ if pload == "PortNumber":
+ port_changed = pval
+ diff += 1
+ if diff == 0: # Idempotency
+ if module.check_mode:
+ module.exit_json(msg="No changes found to be applied to the web server.")
+ module.exit_json(
+ msg="No changes made to the web server configuration as the entered"
+ " values are the same as the current configuration.", webserver_configuration=current_setting)
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Changes found to be applied to the web server.")
+ return current_setting, port_changed
+
+
+def main():
+ specs = {
+ "webserver_port": {"required": False, "type": "int"},
+ "webserver_timeout": {"required": False, "type": "int"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[["webserver_port", "webserver_timeout"]],
+ supports_check_mode=True
+ )
+
+ port_change = False
+ try:
+ with RestOME(module.params, req_session=False) as rest_obj:
+ updated_payload, port_change = get_updated_payload(rest_obj, module)
+ msg = "Successfully updated network web server configuration."
+ resp = rest_obj.invoke_request("PUT", WEBSERVER_CONFIG, data=updated_payload)
+ module.exit_json(msg=msg, webserver_configuration=resp.json_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except SSLError as err:
+ if port_change:
+ module.exit_json(msg="{0} Port has changed to {1}.".format(msg, port_change),
+ webserver_configuration=updated_payload, changed=True)
+ else:
+ module.fail_json(msg=str(err))
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py
new file mode 100644
index 000000000..d2b23c256
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_application_security_settings
+short_description: Configure the login security properties
+description: This module allows you to configure the login security properties on OpenManage Enterprise or OpenManage Enterprise Modular
+version_added: "4.4.0"
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ restrict_allowed_ip_range:
+ description:
+ - Restrict to allow inbound connections only from the specified IP address range.
+ - This is mutually exclusive with I(fips_mode_enable).
+ - "C(NOTE) When I(restrict_allowed_ip_range) is configured on the appliance, any inbound connection to the appliance,
+ such as alert reception, firmware update, and network identities are blocked from the devices that are
+ outside the specified IP address range. However, any outbound connection from the appliance will work on all devices."
+ type: dict
+ suboptions:
+ enable_ip_range:
+ description: Allow connections based on the IP address range.
+ type: bool
+ required: true
+ ip_range:
+ description: "The IP address range in Classless Inter-Domain Routing (CIDR) format.
+ For example: 192.168.100.14/24 or 2001:db8::/24"
+ type: str
+ login_lockout_policy:
+ description:
+ - Locks the application after multiple unsuccessful login attempts.
+ - This is mutually exclusive with I(fips_mode_enable).
+ type: dict
+ suboptions:
+ by_user_name:
+ description: "Enable or disable lockout policy settings based on the user name. This restricts the number of
+ unsuccessful login attempts from a specific user for a specific time interval."
+ type: bool
+ by_ip_address:
+ description: "Enable or disable lockout policy settings based on the IP address. This restricts the number of
+ unsuccessful login attempts from a specific IP address for a specific time interval."
+ type: bool
+ lockout_fail_count:
+ description: "The number of unsuccessful login attempts that are allowed after which the appliance prevents log
+ in from the specific username or IP Address."
+ type: int
+ lockout_fail_window:
+ description: "Lockout fail window is the time in seconds within which the lockout fail count event must occur to
+ trigger the lockout penalty time. Enter the duration for which OpenManage Enterprise must display information
+ about a failed attempt."
+ type: int
+ lockout_penalty_time:
+ description: "The duration of time, in seconds, that login attempts from the specific user or IP address must
+ not be allowed."
+ type: int
+ job_wait:
+ description:
+ - Provides an option to wait for job completion.
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 120
+ fips_mode_enable:
+ description:
+ - "The FIPS mode is intended to meet the requirements of FIPS 140-2 level 1. For more information refer to the FIPS
+ user guide"
+ - This is applicable only for OpenManage Enterprise Modular only
+ - This is mutually exclusive with I(restrict_allowed_ip_range) and I(login_lockout_policy).
+ - "C(WARNING) Enabling or Disabling this option resets your chassis to default settings. This may cause change in
+ IP settings and loss of network connectivity."
+ - "C(WARNING) The FIPS mode cannot be enabled on a lead chassis in a multi-chassis management configuration. To toggle
+ enable FIPS on a lead chassis, delete the chassis group, enable FIPS and recreate the group."
+ - "C(WARNING) For a Standalone or member chassis, enabling the FIPS mode deletes any fabrics created. This may cause
+ loss of network connectivity and data paths to the compute sleds."
+ type: bool
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Configure restricted allowed IP range
+ dellemc.openmanage.ome_application_security_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ restrict_allowed_ip_range:
+ enable_ip_range: true
+ ip_range: 192.1.2.3/24
+
+- name: Configure login lockout policy
+ dellemc.openmanage.ome_application_security_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ login_lockout_policy:
+ by_user_name: true
+ by_ip_address: true
+ lockout_fail_count: 3
+ lockout_fail_window: 30
+ lockout_penalty_time: 900
+
+- name: Configure restricted allowed IP range and login lockout policy with job wait time out of 60 seconds
+ dellemc.openmanage.ome_application_security_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ restrict_allowed_ip_range:
+ enable_ip_range: true
+ ip_range: 192.1.2.3/24
+ login_lockout_policy:
+ by_user_name: true
+ by_ip_address: true
+ lockout_fail_count: 3
+ lockout_fail_window: 30
+ lockout_penalty_time: 900
+ job_wait_timeout: 60
+
+- name: Enable FIPS mode
+ dellemc.openmanage.ome_application_security_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fips_mode_enable: yes
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the login security configuration.
+ returned: always
+ type: str
+ sample: "Successfully applied the security settings."
+job_id:
+ description: Job ID of the security configuration task.
+ returned: When security configuration properties are provided
+ type: int
+ sample: 10123
+error_info:
+ type: dict
+ description: Details of http error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to process the request because the domain information cannot be retrieved.",
+ "MessageArgs": [],
+ "MessageId": "CGEN8007",
+ "RelatedProperties": [],
+ "Resolution": "Verify the status of the database and domain configuration, and then retry the
+ operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+}
+'''
+
+GET_SETTINGS = "ApplicationService/Actions/ApplicationService.GetConfiguration"
+SET_SETTINGS = "ApplicationService/Actions/ApplicationService.ApplyConfiguration"
+FIPS_MODE = "ApplicationService/Security/SecurityConfiguration"
+JOB_EXEC_HISTORY = "JobService/Jobs({job_id})/ExecutionHistories"
+SEC_JOB_TRIGGERED = "Successfully triggered the job to apply security settings."
+SEC_JOB_COMPLETE = "Successfully applied the security settings."
+FIPS_TOGGLED = "Successfully {0} the FIPS mode."
+FIPS_CONN_RESET = "The network connection may have changed. Verify the connection and try again."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+SETTLING_TIME = 2
+JOB_POLL_INTERVAL = 3
+
+import json
+import time
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+def fips_mode_enable(module, rest_obj):
+ resp = rest_obj.invoke_request("GET", FIPS_MODE)
+ fips_payload = resp.json_data
+ curr_fips_mode = fips_payload.get("FipsMode")
+ if module.params.get("fips_mode_enable") is True:
+ fips_mode = "ON"
+ else:
+ fips_mode = "OFF"
+ if curr_fips_mode.lower() == fips_mode.lower():
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ payload = rest_obj.strip_substr_dict(fips_payload)
+ payload["FipsMode"] = fips_mode
+ rest_obj.invoke_request("PUT", FIPS_MODE, data=payload)
+ module.exit_json(msg=FIPS_TOGGLED.format("disabled" if fips_mode == "OFF" else "enabled"), changed=True)
+
+
+def get_security_payload(rest_obj):
+ resp = rest_obj.invoke_request("POST", GET_SETTINGS, data={})
+ full_set = resp.json_data
+ comps = full_set.get("SystemConfiguration", {}).get("Components", [{"Attributes": []}])
+ attribs = comps[0].get("Attributes")
+ attr_dict = dict(
+ [(sys.get('Name'), sys.get("Value")) for sys in attribs if "loginsecurity" in sys.get('Name').lower()])
+ return full_set, attr_dict
+
+
+def compare_merge(module, attr_dict):
+ val_map = {
+ "ip_range": "LoginSecurity.1#IPRangeAddr",
+ "enable_ip_range": "LoginSecurity.1#IPRangeEnable",
+ "by_ip_address": "LoginSecurity.1#LockoutByIPEnable",
+ "by_user_name": "LoginSecurity.1#LockoutByUsernameEnable",
+ "lockout_fail_count": "LoginSecurity.1#LockoutFailCount",
+ "lockout_fail_window": "LoginSecurity.1#LockoutFailCountTime",
+ "lockout_penalty_time": "LoginSecurity.1#LockoutPenaltyTime"
+ }
+ diff = 0
+ inp_dicts = ["restrict_allowed_ip_range", "login_lockout_policy"]
+ for d in inp_dicts:
+ inp_dict = module.params.get(d, {})
+ if inp_dict:
+ for k, v in inp_dict.items():
+ if v is not None:
+ if attr_dict[val_map[k]] != v:
+ attr_dict[val_map[k]] = v
+ diff = diff + 1
+ if attr_dict.get("LoginSecurity.1#IPRangeEnable") is False:
+ if attr_dict.get("LoginSecurity.1#IPRangeAddr") is not None:
+ attr_dict["LoginSecurity.1#IPRangeAddr"] = None
+ diff = diff - 1
+ if not diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ return attr_dict
+
+
+def get_execution_details(rest_obj, job_id, job_message):
+ try:
+ resp = rest_obj.invoke_request('GET', JOB_EXEC_HISTORY.format(job_id=job_id))
+ ex_hist = resp.json_data.get('value')
+ # Sorting based on startTime and to get latest execution instance.
+ tmp_dict = dict((x["StartTime"], x["Id"]) for x in ex_hist)
+ sorted_dates = sorted(tmp_dict.keys())
+ ex_url = JOB_EXEC_HISTORY.format(job_id=job_id) + "({0})/ExecutionHistoryDetails".format(tmp_dict[sorted_dates[-1]])
+ resp = rest_obj.invoke_request('GET', ex_url)
+ ex_hist = resp.json_data.get('value')
+ message = job_message
+ if len(ex_hist) > 0:
+ message = ex_hist[0].get("Value")
+ except Exception:
+ message = job_message
+ message = message.replace('\n', '. ')
+ return message
+
+
+def exit_settings(module, rest_obj, job_id):
+ msg = SEC_JOB_TRIGGERED
+ time.sleep(SETTLING_TIME)
+ if module.params.get("job_wait"):
+ job_failed, job_message = rest_obj.job_tracking(
+ job_id=job_id, job_wait_sec=module.params["job_wait_timeout"], sleep_time=JOB_POLL_INTERVAL)
+ if job_failed is True:
+ job_message = get_execution_details(rest_obj, job_id, job_message)
+ module.exit_json(msg=job_message, failed=True, job_id=job_id)
+ msg = SEC_JOB_COMPLETE
+ module.exit_json(msg=msg, job_id=job_id, changed=True)
+
+
+def login_security_setting(module, rest_obj):
+ security_set, attr_dict = get_security_payload(rest_obj)
+ new_attr_dict = compare_merge(module, attr_dict)
+ comps = security_set.get("SystemConfiguration", {}).get("Components", [{"Attributes": []}])
+ comps[0]["Attributes"] = [{"Name": k, "Value": v} for k, v in new_attr_dict.items()]
+ resp = rest_obj.invoke_request("POST", SET_SETTINGS, data=security_set)
+ job_id = resp.json_data.get("JobId")
+ exit_settings(module, rest_obj, job_id)
+
+
+def main():
+ specs = {
+ "restrict_allowed_ip_range": {
+ "type": 'dict', "options": {
+ "enable_ip_range": {"type": 'bool', "required": True},
+ "ip_range": {"type": 'str'}
+ },
+ "required_if": [("enable_ip_range", True, ("ip_range",))]
+ },
+ "login_lockout_policy": {
+ "type": 'dict', "options": {
+ "by_user_name": {"type": 'bool'},
+ "by_ip_address": {"type": 'bool'},
+ "lockout_fail_count": {"type": 'int'},
+ "lockout_fail_window": {"type": 'int'},
+ "lockout_penalty_time": {"type": 'int'}
+ },
+ "required_one_of": [("by_user_name", "by_ip_address", "lockout_fail_count",
+ "lockout_fail_window", "lockout_penalty_time")]
+ },
+ "fips_mode_enable": {"type": 'bool'},
+ "job_wait": {"type": 'bool', "default": True},
+ "job_wait_timeout": {"type": 'int', "default": 120}
+ }
+ specs.update(ome_auth_params)
+
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[("fips_mode_enable", "login_lockout_policy"),
+ ("fips_mode_enable", "restrict_allowed_ip_range")],
+ required_one_of=[("restrict_allowed_ip_range", "login_lockout_policy", "fips_mode_enable")],
+ supports_check_mode=True)
+
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ if module.params.get("fips_mode_enable") is not None:
+ fips_mode_enable(module, rest_obj)
+ else:
+ login_security_setting(module, rest_obj)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py
new file mode 100644
index 000000000..6b89fea16
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py
@@ -0,0 +1,611 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_chassis_slots
+short_description: Rename sled slots on OpenManage Enterprise Modular
+description: "This module allows to rename sled slots on OpenManage Enterprise Modular either using device id or device
+service tag or using chassis service tag and slot number."
+version_added: "3.6.0"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_options:
+ type: list
+ elements: dict
+ description:
+ - The ID or service tag of the sled in the slot and the new name for the slot.
+ - I(device_options) is mutually exclusive with I(slot_options).
+ suboptions:
+ device_id:
+ type: int
+ description:
+ - Device ID of the sled in the slot.
+ - This is mutually exclusive with I(device_service_tag).
+ device_service_tag:
+ type: str
+ description:
+ - Service tag of the sled in the slot.
+ - This is mutually exclusive with I(device_id).
+ slot_name:
+ type: str
+ description: Provide name for the slot.
+ required: True
+ slot_options:
+ type: list
+ elements: dict
+ description:
+ - The service tag of the chassis, slot number of the slot to be renamed, and the new name for the slot.
+ - I(slot_options) is mutually exclusive with I(device_options).
+ suboptions:
+ chassis_service_tag:
+ type: str
+ description: Service tag of the chassis.
+ required: True
+ slots:
+ type: list
+ elements: dict
+ description:
+ - The slot number and the new name for the slot.
+ required: true
+ suboptions:
+ slot_number:
+ type: int
+ description: The slot number of the slot to be renamed.
+ required: True
+ slot_name:
+ type: str
+ description: Provide name for the slot.
+ required: True
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - "This module initiates the refresh inventory task. It may take a minute for new names to be reflected.
+ If the task exceeds 300 seconds to refresh, the task times out."
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Rename the slots in multiple chassis using slot number and chassis service tag
+ dellemc.openmanage.ome_chassis_slots:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ slot_options:
+ - chassis_service_tag: ABC1234
+ slots:
+ - slot_number: 1
+ slot_name: sled_name_1
+ - slot_number: 2
+ slot_name: sled_name_2
+ - chassis_service_tag: ABC1235
+ slots:
+ - slot_number: 1
+ slot_name: sled_name_1
+ - slot_number: 2
+ slot_name: sled_name_2
+
+- name: Rename single slot name of the sled using sled ID
+ dellemc.openmanage.ome_chassis_slots:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_options:
+ - device_id: 10054
+ slot_name: slot_device_name_1
+
+- name: Rename single slot name of the sled using sled service tag
+ dellemc.openmanage.ome_chassis_slots:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_options:
+ - device_service_tag: ABC1234
+ slot_name: service_tag_slot
+
+- name: Rename multiple slot names of the devices
+ dellemc.openmanage.ome_chassis_slots:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_options:
+ - device_id: 10054
+ slot_name: sled_name_1
+ - device_service_tag: ABC1234
+ slot_name: sled_name_2
+ - device_id: 10055
+ slot_name: sled_name_3
+ - device_service_tag: PQR1234
+ slot_name: sled_name_4
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the slot rename operation.
+ returned: always
+ sample: "Successfully renamed the slot(s)."
+slot_info:
+ description:
+ - Information of the slots that are renamed successfully.
+ - The C(DeviceServiceTag) and C(DeviceId) options are available only if I(device_options) is used.
+ - C(NOTE) Only the slots which were renamed are listed.
+ type: list
+ elements: dict
+ returned: if at least one slot renamed
+ sample: [
+ {
+ "ChassisId": 10053,
+ "ChassisServiceTag": "ABCD123",
+ "DeviceName": "",
+ "DeviceType": 1000,
+ "JobId": 15746,
+ "SlotId": "10072",
+ "SlotName": "slot_op2",
+ "SlotNumber": "6",
+ "SlotType": 2000
+ },
+ {
+ "ChassisId": 10053,
+ "ChassisName": "MX-ABCD123",
+ "ChassisServiceTag": "ABCD123",
+ "DeviceType": "3000",
+ "JobId": 15747,
+ "SlotId": "10070",
+ "SlotName": "slot_op2",
+ "SlotNumber": "4",
+ "SlotType": "2000"
+ },
+ {
+ "ChassisId": "10053",
+ "ChassisName": "MX-PQRS123",
+ "ChassisServiceTag": "PQRS123",
+ "DeviceId": "10054",
+ "DeviceServiceTag": "XYZ5678",
+ "DeviceType": "1000",
+ "JobId": 15761,
+ "SlotId": "10067",
+ "SlotName": "a1",
+ "SlotNumber": "1",
+ "SlotType": "2000"
+ }
+ ]
+rename_failed_slots:
+ description:
+ - Information of the valid slots that are not renamed.
+ - C(JobStatus) is shown if rename job fails.
+ - C(NOTE) Only slots which were not renamed are listed.
+ type: list
+ elements: dict
+ returned: if at least one slot renaming fails
+ sample: [
+ {
+ "ChassisId": "12345",
+ "ChassisName": "MX-ABCD123",
+ "ChassisServiceTag": "ABCD123",
+ "DeviceType": "4000",
+ "JobId": 1234,
+ "JobStatus": "Aborted",
+ "SlotId": "10061",
+ "SlotName": "c2",
+ "SlotNumber": "1",
+ "SlotType": "4000"
+ },
+ {
+ "ChassisId": "10053",
+ "ChassisName": "MX-PQRS123",
+ "ChassisServiceTag": "PQRS123",
+ "DeviceType": "1000",
+ "JobId": 0,
+ "JobStatus": "HTTP Error 400: Bad Request",
+ "SlotId": "10069",
+ "SlotName": "b2",
+ "SlotNumber": "3",
+ "SlotType": "2000"
+ }
+ ]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1014",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the operation because an invalid value is entered for the property
+ Invalid json type: STRING for Edm.Int64 property: Id .",
+ "MessageArgs": [
+ "Invalid json type: STRING for Edm.Int64 property: Id"
+ ],
+ "Severity": "Critical",
+ "Resolution": "Enter a valid value for the property and retry the operation. For more information about
+ valid values, see the OpenManage Enterprise-Modular User's Guide available on the support site."
+ }
+ ]
+ }
+}
+"""
+
+import json
+import time
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.common.dict_transformations import recursive_diff
+
+DEVICE_URI = "DeviceService/Devices"
+JOB_URI = "JobService/Jobs"
+DEVICE_REPEATED = "Duplicate device entry found for devices with identifiers {0}."
+INVALID_SLOT_DEVICE = "Unable to rename one or more slots because either the specified device is invalid or slots " \
+ "cannot be configured. The devices for which the slots cannot be renamed are: {0}."
+JOBS_TRIG_FAIL = "Unable to initiate the slot name rename jobs."
+SUCCESS_MSG = "Successfully renamed the slot(s)."
+SUCCESS_REFRESH_MSG = "The rename slot job(s) completed successfully. " \
+ "For changes to reflect, refresh the inventory task manually."
+FAILED_MSG = "Failed to rename {0} of {1} slot names."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+SLOT_JOB_DESC = "The rename slot task initiated from OpenManage Ansible Module collections"
+REFRESH_JOB_DESC = "The refresh inventory task initiated from OpenManage Ansible Module collections"
+CHASSIS_TAG_INVALID = "Provided chassis {0} is invalid."
+INVALID_SLOT_NUMBERS = "Unable to rename one or more slots because the slot number(s) are invalid: {0}."
+SLOT_NUM_DUP = "Slot numbers are repeated for chassis {0}."
+CHASSIS_REPEATED = "Duplicate chassis entry found for chassis with service tags {0}."
+SETTLING_TIME = 2 # time gap between so consecutive job trigger
+JOB_TIMEOUT = 300
+JOB_INTERVAL = 5
+
+
+def get_device_slot_config(module, rest_obj):
+ ids, tags = {}, {}
+ dvc_list = []
+ for dvc in module.params.get('device_options'):
+ sn = dvc.get('slot_name')
+ id = dvc.get('device_id')
+ st = dvc.get('device_service_tag')
+ if id:
+ ids[str(id)] = sn
+ dvc_list.append(str(id))
+ else:
+ tags[st] = sn
+ dvc_list.append(st)
+ duplicate = [x for i, x in enumerate(dvc_list) if i != dvc_list.index(x)]
+ if duplicate:
+ module.fail_json(msg=DEVICE_REPEATED.format((';'.join(set(duplicate)))))
+ resp = rest_obj.get_all_items_with_pagination(DEVICE_URI)
+ devices = resp.get('value')
+ all_dvcs = {}
+ invalid_slots = set()
+ ident_map, name_map = {}, {}
+ for dvc in devices:
+ if not ids and not tags:
+ break
+ id = str(dvc.get('Id'))
+ tag = dvc.get('Identifier')
+ slot_cfg = dvc.get('SlotConfiguration')
+ all_dvcs[tag] = slot_cfg
+ if id in ids:
+ if not slot_cfg or not slot_cfg.get("SlotNumber"):
+ invalid_slots.add(id)
+ else:
+ ident_map[id] = tag
+ name_map[id] = slot_cfg['SlotName']
+ slot_cfg['new_name'] = ids[id]
+ slot_cfg['DeviceServiceTag'] = tag
+ slot_cfg['DeviceId'] = id
+ if tag in tags:
+ if not slot_cfg or not slot_cfg.get("SlotNumber"):
+ invalid_slots.add(tag)
+ else:
+ ident_map[tag] = tag
+ name_map[tag] = slot_cfg['SlotName']
+ slot_cfg['new_name'] = tags[tag]
+ slot_cfg['DeviceServiceTag'] = tag
+ slot_cfg['DeviceId'] = id
+ idf_list = list(ident_map.values())
+ duplicate = [x for i, x in enumerate(idf_list) if i != idf_list.index(x)]
+ if duplicate:
+ module.fail_json(msg=DEVICE_REPEATED.format((';'.join(set(duplicate)))))
+ invalid_slots.update(set(ids.keys()) - set(ident_map.keys()))
+ invalid_slots.update(set(tags.keys()) - set(ident_map.keys()))
+ if invalid_slots:
+ module.fail_json(msg=INVALID_SLOT_DEVICE.format(';'.join(invalid_slots)))
+ slot_dict_diff = {}
+ id_diff = recursive_diff(ids, name_map)
+ if id_diff and id_diff[0]:
+ diff = dict([(int(k), all_dvcs[ident_map[k]]) for k, v in (id_diff[0]).items()])
+ slot_dict_diff.update(diff)
+ tag_diff = recursive_diff(tags, name_map)
+ if tag_diff and tag_diff[0]:
+ diff = dict([(ident_map[k], all_dvcs[k]) for k, v in (tag_diff[0]).items()])
+ slot_dict_diff.update(diff)
+ if not slot_dict_diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ return slot_dict_diff
+
+
+def start_slot_name_jobs(rest_obj, slot_data):
+ slot_type = {'2000': "Sled Slot", '4000': "IO Module Slot", '2100': "Storage Sled"}
+ failed_jobs = {}
+ job_description = SLOT_JOB_DESC
+ job_type = {"Id": 3, "Name": "DeviceAction_Task"}
+ for k, slot in slot_data.items():
+ job_params, target_param = [{"Key": "operationName", "Value": "UPDATE_SLOT_DATA"}], []
+ num = slot.get('SlotNumber')
+ type_id = str(slot.get('SlotType'))
+ job_name = "Rename {0} {1}".format(slot_type.get(type_id, 'Slot'), num)
+ target_param.append({"Id": int(slot.get('ChassisId')), "Data": "",
+ "TargetType": {"Id": 1000, "Name": "DEVICE"}})
+ slot_config = "{0}|{1}|{2}".format(num, type_id, slot.get('new_name'))
+ job_params.append({'Key': 'slotConfig', 'Value': slot_config})
+ try:
+ job_resp = rest_obj.job_submission(job_name, job_description, target_param,
+ job_params, job_type)
+ slot['JobId'] = job_resp.json_data.get('Id', 0)
+ time.sleep(SETTLING_TIME)
+ except HTTPError as err:
+ slot['JobId'] = 0
+ slot['JobStatus'] = str(err)
+ failed_jobs[k] = slot
+ [slot_data.pop(key) for key in failed_jobs.keys()]
+ return failed_jobs
+
+
+def get_job_states(module, rest_obj, slot_data):
+ job_dict = dict([(slot['JobId'], k) for k, slot in slot_data.items() if slot['JobId']])
+ query_params = {"$filter": "JobType/Id eq 3"} # optimize this
+ count = JOB_TIMEOUT // SETTLING_TIME
+ job_incomplete = [2050, 2030, 2040, 2080] # Running, Queued, Starting, New
+ while count > 0 and job_dict:
+ try:
+ job_resp = rest_obj.invoke_request("GET", JOB_URI, query_param=query_params)
+ jobs = job_resp.json_data.get('value')
+ except HTTPError:
+ count = count - 50 # 3 times retry for HTTP error
+ time.sleep(SETTLING_TIME)
+ continue
+ job_over = []
+ for job in jobs:
+ id = job.get('Id')
+ if id in job_dict:
+ lrs = job.get('LastRunStatus')
+ slot = slot_data[job_dict[id]]
+ if lrs.get('Id') in job_incomplete: # Running, not failed, not completed state
+ job_over.append(False)
+ elif lrs.get('Id') == 2060:
+ job_over.append(True)
+ slot['SlotName'] = slot.pop('new_name')
+ job_dict.pop(id)
+ else:
+ slot['JobStatus'] = lrs.get('Name')
+ job_over.append(True) # Failed states - job not running
+ if all(job_over) or not job_dict:
+ break
+ count = count - 1
+ time.sleep(SETTLING_TIME)
+ failed_jobs = dict([(k, slot_data.pop(k)) for k in job_dict.values()])
+ return failed_jobs
+
+
+def trigger_refresh_inventory(rest_obj, slot_data):
+ chassis_dict = dict([(slot['ChassisId'], slot['ChassisServiceTag']) for slot in slot_data.values()])
+ jobs = []
+ for chassis in chassis_dict:
+ job_type = {"Id": 8, "Name": "Inventory_Task"}
+ job_name = "Refresh Inventory Chassis {0}".format(chassis_dict[chassis])
+ job_description = REFRESH_JOB_DESC
+ target_param = [{"Id": int(chassis), "Data": "''", "TargetType": {"Id": 1000, "Name": "DEVICE"}}]
+ job_params = [{"Key": "operationName", "Value": "EC_SLOT_DEVICE_INVENTORY_REFRESH"}]
+ job_resp = rest_obj.job_submission(job_name, job_description, target_param, job_params, job_type)
+ job_id = job_resp.json_data.get('Id')
+ jobs.append(int(job_id))
+ time.sleep(SETTLING_TIME)
+ return jobs
+
+
+def trigger_all_inventory_task(rest_obj):
+ job_type = {"Id": 8, "Name": "Inventory_Task"}
+ job_name = "Refresh Inventory All Devices"
+ job_description = REFRESH_JOB_DESC
+ target_param = [{"Id": 500, "Data": "All-Devices", "TargetType": {"Id": 6000, "Name": "GROUP"}}]
+ job_params = [{"Key": "defaultInventoryTask", "Value": "TRUE"}]
+ job_resp = rest_obj.job_submission(job_name, job_description, target_param, job_params, job_type)
+ job_id = job_resp.json_data.get('Id')
+ return job_id
+
+
+def get_formatted_slotlist(slot_dict):
+ slot_list = list(slot_dict.values())
+ req_tup = ('slot', 'job', 'chassis', 'device')
+ for slot in slot_list:
+ cp = slot.copy()
+ klist = cp.keys()
+ for k in klist:
+ if not str(k).lower().startswith(req_tup):
+ slot.pop(k)
+ return slot_list
+
+
+def exit_slot_config(module, rest_obj, failed_jobs, invalid_jobs, slot_data):
+ failed_jobs.update(invalid_jobs)
+ if failed_jobs:
+ f = len(failed_jobs)
+ s = len(slot_data)
+ slot_info = get_formatted_slotlist(slot_data)
+ failed_jobs_list = get_formatted_slotlist(failed_jobs)
+ module.fail_json(msg=FAILED_MSG.format(f, s + f),
+ slot_info=slot_info, rename_failed_slots=failed_jobs_list)
+ if slot_data:
+ job_failed_list = []
+ try:
+ rfrsh_job_list = trigger_refresh_inventory(rest_obj, slot_data)
+ for job in rfrsh_job_list:
+ job_failed, job_message = rest_obj.job_tracking(
+ job, job_wait_sec=JOB_TIMEOUT, sleep_time=JOB_INTERVAL)
+ job_failed_list.append(job_failed)
+ all_dv_rfrsh = trigger_all_inventory_task(rest_obj)
+ job_failed, job_message = rest_obj.job_tracking(
+ all_dv_rfrsh, job_wait_sec=JOB_TIMEOUT, sleep_time=JOB_INTERVAL)
+ job_failed_list.append(job_failed)
+ except Exception: # Refresh is secondary task hence not failing module
+ job_failed_list = [True]
+ if any(job_failed_list) is True:
+ slot_info = get_formatted_slotlist(slot_data)
+ failed_jobs_list = get_formatted_slotlist(failed_jobs)
+ module.exit_json(changed=True, msg=SUCCESS_REFRESH_MSG, slot_info=slot_info,
+ rename_failed_slots=failed_jobs_list)
+ slot_info = get_formatted_slotlist(slot_data)
+ module.exit_json(changed=True, msg=SUCCESS_MSG, slot_info=slot_info,
+ rename_failed_slots=list(failed_jobs.values()))
+
+
+def get_device_type(rest_obj, type):
+ filter = {"$filter": "Type eq {0}".format(str(type))}
+ resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param=filter)
+ return resp.json_data
+
+
+def get_slot_data(module, rest_obj, ch_slots, chass_id):
+ uri = DEVICE_URI + "({0})/DeviceBladeSlots".format(chass_id)
+ chsvc_tag = ch_slots.get('chassis_service_tag')
+ resp = rest_obj.invoke_request("GET", uri)
+ blade_slots = resp.json_data.get('value')
+ if len(blade_slots) < 8:
+ # Storage type 3000
+ resp = get_device_type(rest_obj, 3000)
+ storage = resp.get('value')
+ for stx in storage:
+ if stx.get('ChassisServiceTag') == chsvc_tag:
+ blade_slots.append(stx.get('SlotConfiguration'))
+ blade_dict = {}
+ for slot in blade_slots:
+ slot["ChassisId"] = chass_id
+ slot["ChassisServiceTag"] = chsvc_tag
+ if slot.get('Id'):
+ slot["SlotId"] = str(slot.get('Id'))
+ blade_dict[slot['SlotNumber']] = slot
+ rest_obj.strip_substr_dict(slot)
+ inp_slots = ch_slots.get('slots')
+ existing_dict = dict([(slot['SlotNumber'], slot['SlotName']) for slot in blade_slots])
+ input_dict = dict([(str(slot['slot_number']), slot['slot_name']) for slot in inp_slots])
+ invalid_slot_number = set(input_dict.keys()) - set(existing_dict.keys())
+ if invalid_slot_number:
+ module.fail_json(msg=INVALID_SLOT_NUMBERS.format(';'.join(invalid_slot_number)))
+ if len(input_dict) < len(inp_slots):
+ module.fail_json(msg=SLOT_NUM_DUP.format(chsvc_tag))
+ slot_dict_diff = {}
+ slot_diff = recursive_diff(input_dict, existing_dict)
+ if slot_diff and slot_diff[0]:
+ diff = {}
+ for k, v in (slot_diff[0]).items():
+ blade_dict[k]['new_name'] = input_dict.get(k)
+ diff["{0}_{1}".format(chsvc_tag, k)] = blade_dict[k]
+ slot_dict_diff.update(diff)
+ return slot_dict_diff
+
+
+def slot_number_config(module, rest_obj):
+ chslots = module.params.get("slot_options")
+ resp = get_device_type(rest_obj, 2000)
+ chassi_dict = dict([(chx['Identifier'], chx['Id']) for chx in resp.get('value')])
+ slot_data = {}
+ input_chassi_list = list(chx.get('chassis_service_tag') for chx in chslots)
+ duplicate = [x for i, x in enumerate(input_chassi_list) if i != input_chassi_list.index(x)]
+ if duplicate:
+ module.fail_json(msg=CHASSIS_REPEATED.format((';'.join(set(duplicate)))))
+ for chx in chslots:
+ chsvc_tag = chx.get('chassis_service_tag')
+ if chsvc_tag not in chassi_dict.keys():
+ module.fail_json(msg=CHASSIS_TAG_INVALID.format(chsvc_tag))
+ slot_dict = get_slot_data(module, rest_obj, chx, chassi_dict[chsvc_tag])
+ slot_data.update(slot_dict)
+ if not slot_data:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ return slot_data
+
+
+def main():
+ specs = {
+ "device_options": {"type": 'list', "elements": 'dict',
+ "options": {
+ "slot_name": {"required": True, 'type': 'str'},
+ "device_id": {"type": 'int'},
+ "device_service_tag": {"type": 'str'}
+ },
+ "mutually_exclusive": [('device_id', 'device_service_tag')],
+ "required_one_of": [('device_id', 'device_service_tag')]
+ },
+ "slot_options": {"type": 'list', "elements": 'dict',
+ "options": {
+ "chassis_service_tag": {"required": True, 'type': 'str'},
+ "slots": {"required": True, "type": 'list', "elements": 'dict',
+ "options": {
+ "slot_number": {"required": True, 'type': 'int'},
+ "slot_name": {"required": True, "type": 'str'}
+ },
+ },
+ },
+ },
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[('slot_options', 'device_options')],
+ mutually_exclusive=[('slot_options', 'device_options')],
+ supports_check_mode=True
+ )
+
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ if module.params.get("slot_options"):
+ slot_data = slot_number_config(module, rest_obj)
+ else:
+ slot_data = get_device_slot_config(module, rest_obj)
+ invalid_jobs = start_slot_name_jobs(rest_obj, slot_data)
+ failed_jobs = {}
+ if slot_data:
+ failed_jobs = get_job_states(module, rest_obj, slot_data)
+ else:
+ module.fail_json(msg=JOBS_TRIG_FAIL, rename_failed_slots=list(invalid_jobs.values()))
+ exit_slot_config(module, rest_obj, failed_jobs, invalid_jobs, slot_data)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py
new file mode 100644
index 000000000..5cac7352d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py
@@ -0,0 +1,842 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_configuration_compliance_baseline
+short_description: Create, modify, and delete a configuration compliance baseline and remediate non-compliant devices on
+ OpenManage Enterprise
+version_added: "3.2.0"
+description: "This module allows to create, modify, and delete a configuration compliance baseline on OpenManage Enterprise.
+ This module also allows to remediate devices that are non-compliant with the baseline by changing the attributes of devices
+ to match with the associated baseline attributes."
+extends_documentation_fragment:
+ - dellemc.openmanage.oment_auth_options
+options:
+ command:
+ description:
+ - "C(create) creates a configuration baseline from an existing compliance template.C(create) supports
+ C(check_mode) or idempotency checking for only I(names)."
+ - "C(modify) modifies an existing baseline.Only I(names), I(description), I(device_ids), I(device_service_tags),
+ and I(device_group_names) can be modified"
+ - "I(WARNING) When a baseline is modified, the provided I(device_ids), I(device_group_names), and I(device_service_tags)
+ replaces the devices previously present in the baseline."
+ - C(delete) deletes the list of configuration compliance baselines based on the baseline name. Invalid baseline
+ names are ignored.
+ - "C(remediate) remediates devices that are non-compliant with the baseline by changing the attributes of devices
+ to match with the associated baseline attributes."
+ - "C(remediate) is performed on all the non-compliant devices if either I(device_ids), or I(device_service_tags)
+ is not provided."
+ choices: [create, modify, delete, remediate]
+ default: create
+ type: str
+ names:
+ description:
+ - Name(s) of the configuration compliance baseline.
+ - This option is applicable when I(command) is C(create), C(modify), or C(delete).
+ - Provide the list of configuration compliance baselines names that are supported when I(command) is C(delete).
+ required: true
+ type: list
+ elements: str
+ new_name:
+ description:
+ - New name of the compliance baseline to be modified.
+ - This option is applicable when I(command) is C(modify).
+ type: str
+ template_name:
+ description:
+ - Name of the compliance template for creating the compliance baseline(s).
+ - Name of the deployment template to be used for creating a compliance baseline.
+ - This option is applicable when I(command) is C(create) and is mutually exclusive with I(template_id).
+ type: str
+ template_id:
+ description:
+ - ID of the deployment template to be used for creating a compliance baseline.
+ - This option is applicable when I(command) is C(create) and is mutually exclusive with I(template_name).
+ type: int
+ device_ids:
+ description:
+ - IDs of the target devices.
+ - This option is applicable when I(command) is C(create), C(modify), or C(remediate), and is mutually exclusive
+ with I(device_service_tag) and I(device_group_names).
+ type: list
+ elements: int
+ device_service_tags:
+ description:
+ - Service tag of the target device.
+ - This option is applicable when I(command) is C(create), C(modify), or C(remediate) and is mutually exclusive with
+ I(device_ids) and I(device_group_names).
+ type: list
+ elements: str
+ device_group_names:
+ description:
+ - Name of the target device group.
+ - This option is applicable when I(command) is C(create), or C(modify)
+ and is mutually exclusive with I(device_ids) and I(device_service_tag).
+ type: list
+ elements: str
+ description:
+ description:
+ - Description of the compliance baseline.
+ - This option is applicable when I(command) is C(create), or C(modify).
+ type: str
+ job_wait:
+ description:
+ - Provides the option to wait for job completion.
+ - This option is applicable when I(command) is C(create), C(modify), or C(remediate).
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds.The job will only be tracked for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 10800
+requirements:
+ - "python >= 3.8.6"
+author: "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - This module supports C(check_mode).
+ - Ensure that the devices have the required licenses to perform the baseline compliance operations.
+'''
+
+EXAMPLES = r'''
+---
+- name: Create a configuration compliance baseline using device IDs
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ names: "baseline1"
+ template_name: "template1"
+ description: "description of baseline"
+ device_ids:
+ - 1111
+ - 2222
+
+- name: Create a configuration compliance baseline using device service tags
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ names: "baseline1"
+ template_id: 1234
+ description: "description of baseline"
+ device_service_tags:
+ - "SVCTAG1"
+ - "SVCTAG2"
+
+- name: Create a configuration compliance baseline using group names
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ names: "baseline2"
+ template_id: 2
+ job_wait_timeout: 1000
+ description: "description of baseline"
+ device_group_names:
+ - "Group1"
+ - "Group2"
+
+- name: Delete the configuration compliance baselines
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: delete
+ names:
+ - baseline1
+ - baseline2
+
+- name: Modify a configuration compliance baseline using group names
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: modify
+ names: "baseline1"
+ new_name: "baseline_update"
+ template_name: "template2"
+ description: "new description of baseline"
+ job_wait_timeout: 1000
+ device_group_names:
+ - Group1
+
+- name: Remediate specific non-compliant devices to a configuration compliance baseline using device IDs
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "remediate"
+ names: "baseline1"
+ device_ids:
+ - 1111
+
+- name: Remediate specific non-compliant devices to a configuration compliance baseline using device service tags
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "remediate"
+ names: "baseline1"
+ device_service_tags:
+ - "SVCTAG1"
+ - "SVCTAG2"
+
+- name: Remediate all the non-compliant devices to a configuration compliance baseline
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "remediate"
+ names: "baseline1"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the configuration compliance baseline operation.
+ returned: always
+ type: str
+ sample: "Successfully created the configuration compliance baseline."
+incompatible_devices:
+ description: Details of the devices which cannot be used to perform baseline compliance operations
+ returned: when I(device_service_tags) or I(device_ids) contains incompatible devices for C(create) or C(modify)
+ type: list
+ sample: [1234, 5678]
+compliance_status:
+ description: Status of compliance baseline operation.
+ returned: when I(command) is C(create) or C(modify)
+ type: dict
+ sample: {
+ "Id": 13,
+ "Name": "baseline1",
+ "Description": null,
+ "TemplateId": 102,
+ "TemplateName": "one",
+ "TemplateType": 2,
+ "TaskId": 26584,
+ "PercentageComplete": "100",
+ "TaskStatus": 2070,
+ "LastRun": "2021-02-27 13:15:13.751",
+ "BaselineTargets": [
+ {
+ "Id": 1111,
+ "Type": {
+ "Id": 1000,
+ "Name": "DEVICE"
+ }
+ }
+ ],
+ "ConfigComplianceSummary": {
+ "ComplianceStatus": "OK",
+ "NumberOfCritical": 0,
+ "NumberOfWarning": 0,
+ "NumberOfNormal": 0,
+ "NumberOfIncomplete": 0
+ }
+ }
+job_id:
+ description:
+ - Task ID created when I(command) is C(remediate).
+ returned: when I(command) is C(remediate)
+ type: int
+ sample: 14123
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import time
+import re
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+COMPLIANCE_BASELINE = "TemplateService/Baselines"
+REMEDIATE_BASELINE = "TemplateService/Actions/TemplateService.Remediate"
+DELETE_COMPLIANCE_BASELINE = "TemplateService/Actions/TemplateService.RemoveBaseline"
+MODIFY_COMPLIANCE_BASELINE = "api/TemplateService/Baselines({baseline_id})"
+TEMPLATE_VIEW = "TemplateService/Templates"
+DEVICE_VIEW = "DeviceService/Devices"
+GROUP_VIEW = "GroupService/Groups"
+OME_INFO = "ApplicationService/Info"
+CONFIG_COMPLIANCE_URI = "TemplateService/Baselines({0})/DeviceConfigComplianceReports"
+INVALID_DEVICES = "{identifier} details are not available."
+TEMPLATE_ID_ERROR_MSG = "Template with ID '{template_id}' not found."
+TEMPLATE_NAME_ERROR_MSG = "Template '{template_name}' not found."
+NAMES_ERROR = "Only delete operations accept multiple baseline names. All the other operations accept only a single " \
+ "baseline name."
+BASELINE_CHECK_MODE_CHANGE_MSG = "Baseline '{name}' already exists."
+CHECK_MODE_CHANGES_MSG = "Changes found to be applied."
+CHECK_MODE_NO_CHANGES_MSG = "No changes found to be applied."
+BASELINE_CHECK_MODE_NOCHANGE_MSG = "Baseline '{name}' does not exist."
+CREATE_MSG = "Successfully created the configuration compliance baseline."
+DELETE_MSG = "Successfully deleted the configuration compliance baseline(s)."
+MODIFY_MSG = "Successfully modified the configuration compliance baseline."
+TASK_PROGRESS_MSG = "The initiated task for the configuration compliance baseline is in progress."
+INVALID_IDENTIFIER = "Target with {identifier} {invalid_val} not found."
+IDEMPOTENCY_MSG = "The specified configuration compliance baseline details are the same as the existing settings."
+INVALID_COMPLIANCE_IDENTIFIER = "Unable to complete the operation because the entered target {0} {1}" \
+ " is not associated or complaint with the baseline '{2}'."
+INVALID_TIME = "job_wait_timeout {0} is not valid."
+REMEDIATE_MSG = "Successfully completed the remediate operation."
+JOB_FAILURE_PROGRESS_MSG = "The initiated task for the configuration compliance baseline has failed."
+NO_CAPABLE_DEVICES = "Target {0} contains devices which cannot be used for a baseline compliance operation."
+
+
+def validate_identifiers(available_values, requested_values, identifier_types, module):
+ """
+ Validate if requested group/device ids are valid
+ """
+ val = set(requested_values) - set(available_values)
+ if val:
+ module.fail_json(msg=INVALID_IDENTIFIER.format(identifier=identifier_types, invalid_val=",".join(map(str, val))))
+
+
+def get_identifiers(available_identifiers_map, requested_values):
+ """
+ Get the device id from service tag
+ or Get the group id from Group names
+ or get the id from baseline names
+ """
+ id_list = []
+ for key, val in available_identifiers_map.items():
+ if val in requested_values:
+ id_list.append(key)
+ return id_list
+
+
+def get_template_details(module, rest_obj):
+ """
+ Validate the template.
+ """
+ template_identifier = module.params.get('template_id')
+ query_param = {"$filter": "Id eq {0}".format(template_identifier)}
+ identifier = 'Id'
+ if not template_identifier:
+ template_identifier = module.params.get('template_name')
+ query_param = {"$filter": "Name eq '{0}'".format(template_identifier)}
+ identifier = 'Name'
+ resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ template_list = resp.json_data.get('value', [])
+ for each_template in template_list:
+ if each_template.get(identifier) == template_identifier:
+ return each_template
+ if identifier == "Id":
+ module.fail_json(msg=TEMPLATE_ID_ERROR_MSG.format(template_id=template_identifier))
+ else:
+ module.fail_json(msg=TEMPLATE_NAME_ERROR_MSG.format(template_name=template_identifier))
+
+
+def get_group_ids(module, rest_obj):
+ """
+ Get the group ids
+ """
+ params = module.params
+ resp_data = rest_obj.get_all_items_with_pagination(GROUP_VIEW)
+ values = resp_data["value"]
+ device_group_names_list = params.get("device_group_names")
+ final_target_list = []
+ if values:
+ available_ids_tag_map = dict([(item["Id"], item["Name"]) for item in values])
+ available_device_tags = available_ids_tag_map.values()
+ tags_identifier = "device_group_names"
+ validate_identifiers(available_device_tags, device_group_names_list, tags_identifier, module)
+ final_target_list = get_identifiers(available_ids_tag_map, device_group_names_list)
+ else:
+ module.fail_json(msg=INVALID_DEVICES.format(identifier="Group"))
+ return final_target_list
+
+
+def get_device_capabilities(devices_list, identifier):
+ if identifier == "device_ids":
+ available_ids_capability_map = dict([(item["Id"], item.get("DeviceCapabilities", [])) for item in devices_list])
+ else:
+ available_ids_capability_map = dict(
+ [(item["Identifier"], item.get("DeviceCapabilities", [])) for item in devices_list])
+ capable_devices = []
+ noncapable_devices = []
+ for key, val in available_ids_capability_map.items():
+ if 33 in val:
+ capable_devices.append(key)
+ else:
+ noncapable_devices.append(key)
+ return {"capable": capable_devices, "non_capable": noncapable_devices}
+
+
+def get_device_ids(module, rest_obj):
+ """
+ Get the requested device ids
+ """
+ params = module.params
+ resp_data = rest_obj.get_all_report_details(DEVICE_VIEW)
+ values = resp_data["report_list"]
+ id_list = params.get("device_ids")
+ service_tags_list = params.get("device_service_tags")
+ final_target_list = []
+ device_capability_map = {}
+ identifier = "device_ids"
+ if values:
+ available_ids_tag_map = dict([(item["Id"], item["Identifier"]) for item in values])
+ if id_list:
+ available_device_ids = available_ids_tag_map.keys()
+ validate_identifiers(available_device_ids, id_list, "device_ids", module)
+ final_target_list = id_list
+ if service_tags_list:
+ available_device_tags = available_ids_tag_map.values()
+ validate_identifiers(available_device_tags, service_tags_list, "device_service_tags", module)
+ id_list = get_identifiers(available_ids_tag_map, service_tags_list)
+ identifier = "device_service_tags"
+ final_target_list = id_list
+ else:
+ module.fail_json(msg=INVALID_DEVICES.format(identifier="Device"))
+ if final_target_list:
+ device_capability_map = get_device_capabilities(values, identifier)
+ return final_target_list, device_capability_map
+
+
+def validate_capability(module, device_capability_map):
+ """
+ For any non capable devices return the module with failure with list of
+ non capable devices
+ """
+ if module.params.get("device_ids"):
+ device_id_list = module.params.get("device_ids")
+ identifier_types = "device_ids"
+ else:
+ device_id_list = module.params.get("device_service_tags")
+ identifier_types = "device_service_tags"
+ capable_devices = set(device_id_list) & set(device_capability_map.get("capable", []))
+ if len(capable_devices) == 0 or capable_devices and len(capable_devices) != len(device_id_list):
+ non_capable_devices = list(set(device_id_list) - capable_devices)
+ module.fail_json(msg=NO_CAPABLE_DEVICES.format(identifier_types),
+ incompatible_devices=non_capable_devices)
+
+
+def create_payload(module, rest_obj):
+ """
+ create the compliance baseline payload
+ """
+ params = module.params
+ device_id_list = params.get("device_ids")
+ device_service_tags_list = params.get("device_service_tags")
+ group_service_tags_list = params.get("device_group_names")
+ final_target_list = []
+ if device_id_list or device_service_tags_list:
+ device_id_list, device_capability_map = get_device_ids(module, rest_obj)
+ validate_capability(module, device_capability_map)
+ final_target_list = device_id_list
+ if group_service_tags_list:
+ group_id_list = get_group_ids(module, rest_obj)
+ final_target_list.extend(group_id_list)
+ payload = {
+ "Name": params["names"][0]
+ }
+ if module.params.get("template_id") or module.params.get("template_name"):
+ template = get_template_details(module, rest_obj)
+ payload["TemplateId"] = template["Id"]
+ if module.params.get("description"):
+ payload["Description"] = module.params["description"]
+ if final_target_list:
+ payload["BaselineTargets"] = [{"Id": item} for item in final_target_list]
+ return payload
+
+
+def get_baseline_compliance_info(rest_obj, baseline_identifier_val, attribute="Id"):
+ """
+ Get the baseline info for the created compliance baseline
+ """
+ data = rest_obj.get_all_items_with_pagination(COMPLIANCE_BASELINE)
+ value = data["value"]
+ baseline_info = {}
+ for item in value:
+ if item[attribute] == baseline_identifier_val:
+ baseline_info = item
+ baseline_info.pop("@odata.type", None)
+ baseline_info.pop("@odata.id", None)
+ baseline_info.pop("DeviceConfigComplianceReports@odata.navigationLink", None)
+ break
+ return baseline_info
+
+
+def track_compliance_task_completion(rest_obj, baseline_identifier_val, module):
+ """
+ wait for the compliance configuration task to complete
+ """
+ baseline_info = get_baseline_compliance_info(rest_obj, baseline_identifier_val)
+ command = module.params["command"]
+ if module.params.get("job_wait"):
+ wait_time = 5
+ retries_count_limit = module.params["job_wait_timeout"] / wait_time
+ retries_count = 0
+ time.sleep(wait_time)
+ if command == "create":
+ msg = CREATE_MSG
+ else:
+ msg = MODIFY_MSG
+ while retries_count <= retries_count_limit:
+ if baseline_info["PercentageComplete"] == "100":
+ break
+ retries_count += 1
+ time.sleep(wait_time)
+ baseline_info = get_baseline_compliance_info(rest_obj, baseline_identifier_val)
+ if baseline_info["PercentageComplete"] != "100":
+ msg = TASK_PROGRESS_MSG
+ else:
+ msg = TASK_PROGRESS_MSG
+ return msg, baseline_info
+
+
+def validate_create_baseline_idempotency(module, rest_obj):
+ """
+ Idempotency check for compliance baseline create.
+ Return error message if baseline name already exists in the system
+ """
+ name = module.params["names"][0]
+ baseline_info = get_baseline_compliance_info(rest_obj, name, attribute="Name")
+ if any(baseline_info):
+ module.exit_json(msg=BASELINE_CHECK_MODE_CHANGE_MSG.format(name=name), changed=False)
+ if not any(baseline_info) and module.check_mode:
+ module.exit_json(msg=CHECK_MODE_CHANGES_MSG, changed=True)
+
+
+def create_baseline(module, rest_obj):
+ """
+ Create the compliance baseline.
+ update the response by getting compliance info.
+ Note: The response is updated from GET info reason many attribute values are gving null
+ value. which can be retrieved by getting the created compliance info.
+ """
+ payload = create_payload(module, rest_obj)
+ validate_create_baseline_idempotency(module, rest_obj)
+ resp = rest_obj.invoke_request('POST', COMPLIANCE_BASELINE, data=payload)
+ data = resp.json_data
+ compliance_id = data["Id"]
+ baseline_info = get_baseline_compliance_info(rest_obj, compliance_id)
+ if module.params.get("job_wait"):
+ job_failed, message = rest_obj.job_tracking(baseline_info["TaskId"],
+ job_wait_sec=module.params["job_wait_timeout"],
+ sleep_time=5)
+ baseline_updated_info = get_baseline_compliance_info(rest_obj, compliance_id)
+ if job_failed is True:
+ module.fail_json(msg=message, compliance_status=baseline_updated_info, changed=False)
+ else:
+ if "successfully" in message:
+ module.exit_json(msg=CREATE_MSG, compliance_status=baseline_updated_info, changed=True)
+ else:
+ module.exit_json(msg=message, compliance_status=baseline_updated_info, changed=False)
+ else:
+ module.exit_json(msg=TASK_PROGRESS_MSG, compliance_status=baseline_info, changed=True)
+
+
+def validate_names(command, module):
+ """
+ The command create, remediate and modify doest not supports more than one name
+ """
+ names = module.params["names"]
+ if command != "delete" and len(names) > 1:
+ module.fail_json(msg=NAMES_ERROR)
+
+
+def delete_idempotency_check(module, rest_obj):
+ delete_names = module.params["names"]
+ data = rest_obj.get_all_items_with_pagination(COMPLIANCE_BASELINE)
+ available_baseline_map = dict([(item["Id"], item["Name"]) for item in data["value"]])
+ valid_names = set(delete_names) & set(available_baseline_map.values())
+ valid_id_list = get_identifiers(available_baseline_map, valid_names)
+ if module.check_mode and len(valid_id_list) > 0:
+ module.exit_json(msg=CHECK_MODE_CHANGES_MSG, changed=True)
+ if len(valid_id_list) == 0:
+ module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG, changed=False)
+ return valid_id_list
+
+
+def delete_compliance(module, rest_obj):
+ """
+ Deletes the list of baselines
+ """
+ valid_id_list = delete_idempotency_check(module, rest_obj)
+ rest_obj.invoke_request('POST', DELETE_COMPLIANCE_BASELINE, data={"BaselineIds": valid_id_list})
+ module.exit_json(msg=DELETE_MSG, changed=True)
+
+
+def compare_payloads(modify_payload, current_payload):
+ """
+ :param modify_payload: payload created to update existing setting
+ :param current_payload: already existing payload for specified baseline
+ :return: bool - compare existing and requested setting values of baseline in case of modify operations
+ if both are same return True
+ """
+ diff = False
+ for key, val in modify_payload.items():
+ if current_payload is None or current_payload.get(key) is None:
+ return True
+ elif isinstance(val, dict):
+ if compare_payloads(val, current_payload.get(key)):
+ return True
+ elif val != current_payload.get(key):
+ return True
+ return diff
+
+
+def idempotency_check_for_command_modify(current_payload, expected_payload, module):
+ """
+ idempotency check in case of modify operation
+ :param current_payload: payload modify
+ :param expected_payload: already existing payload for specified.
+ :param module: ansible module object
+ :return: None
+ """
+ payload_diff = compare_payloads(expected_payload, current_payload)
+ if module.check_mode:
+ if payload_diff:
+ module.exit_json(msg=CHECK_MODE_CHANGES_MSG, changed=True)
+ else:
+ module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG, changed=False)
+ elif not module.check_mode and not payload_diff:
+ module.exit_json(msg=IDEMPOTENCY_MSG, changed=False)
+
+
+def modify_baseline(module, rest_obj):
+ name = module.params["names"][0]
+ baseline_info = get_baseline_compliance_info(rest_obj, name, attribute="Name")
+ if not any(baseline_info):
+ module.fail_json(msg=BASELINE_CHECK_MODE_NOCHANGE_MSG.format(name=name))
+ current_payload = create_payload(module, rest_obj)
+ current_payload["Id"] = baseline_info["Id"]
+ if module.params.get("new_name"):
+ new_name = module.params.get("new_name")
+ if name != new_name:
+ baseline_info_new = get_baseline_compliance_info(rest_obj, new_name, attribute="Name")
+ if any(baseline_info_new):
+ module.fail_json(msg=BASELINE_CHECK_MODE_CHANGE_MSG.format(name=new_name))
+ current_payload["Name"] = new_name
+ required_attributes = ["Id", "Name", "Description", "TemplateId", "BaselineTargets"]
+ existing_payload = dict([(key, val) for key, val in baseline_info.items() if key in required_attributes and val])
+ if existing_payload.get("BaselineTargets"):
+ target = [{"Id": item["Id"]} for item in existing_payload["BaselineTargets"]]
+ existing_payload["BaselineTargets"] = target
+ idempotency_check_for_command_modify(existing_payload, current_payload, module)
+ existing_payload.update(current_payload)
+ baseline_update_uri = COMPLIANCE_BASELINE + "({baseline_id})".format(baseline_id=existing_payload["Id"])
+ resp = rest_obj.invoke_request('PUT', baseline_update_uri, data=existing_payload)
+ data = resp.json_data
+ compliance_id = data["Id"]
+ baseline_info = get_baseline_compliance_info(rest_obj, compliance_id)
+ if module.params.get("job_wait"):
+ job_failed, message = rest_obj.job_tracking(baseline_info["TaskId"],
+ job_wait_sec=module.params["job_wait_timeout"], sleep_time=5)
+ baseline_updated_info = get_baseline_compliance_info(rest_obj, compliance_id)
+ if job_failed is True:
+ module.fail_json(msg=message, compliance_status=baseline_updated_info, changed=False)
+ else:
+ if "successfully" in message:
+ module.exit_json(msg=MODIFY_MSG, compliance_status=baseline_updated_info, changed=True)
+ else:
+ module.exit_json(msg=message, compliance_status=baseline_updated_info, changed=False)
+ else:
+ module.exit_json(msg=TASK_PROGRESS_MSG, compliance_status=baseline_info, changed=True)
+
+
+def get_ome_version(rest_obj):
+ resp = rest_obj.invoke_request('GET', OME_INFO)
+ data = resp.json_data
+ return data["Version"]
+
+
+def validate_remediate_idempotency(module, rest_obj):
+ name = module.params["names"][0]
+ baseline_info = get_baseline_compliance_info(rest_obj, name, attribute="Name")
+ if not any(baseline_info):
+ module.fail_json(msg=BASELINE_CHECK_MODE_NOCHANGE_MSG.format(name=name))
+ valid_id_list, device_capability_map = get_device_ids(module, rest_obj)
+ compliance_reports = rest_obj.get_all_items_with_pagination(CONFIG_COMPLIANCE_URI.format(baseline_info["Id"]))
+ device_id_list = module.params.get("device_ids")
+ device_service_tags_list = module.params.get("device_service_tags")
+ if device_id_list:
+ compliance_report_map = dict([(item["Id"], item["ComplianceStatus"]) for item in compliance_reports["value"]])
+ if not any(compliance_report_map):
+ module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG)
+ invalid_values = list(set(device_id_list) - set(compliance_report_map.keys()))
+ if invalid_values:
+ module.fail_json(
+ INVALID_COMPLIANCE_IDENTIFIER.format("device_ids", ",".join(map(str, invalid_values)), name))
+ report_devices = list(set(device_id_list) & set(compliance_report_map.keys()))
+ noncomplaint_devices = [device for device in report_devices if compliance_report_map[device] == "NONCOMPLIANT"
+ or compliance_report_map[device] == 2]
+ elif device_service_tags_list:
+ compliance_report_map = dict(
+ [(item["ServiceTag"], item["ComplianceStatus"]) for item in compliance_reports["value"]])
+ if not any(compliance_report_map):
+ module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG)
+ invalid_values = list(set(device_service_tags_list) - set(compliance_report_map.keys()))
+ if invalid_values:
+ module.fail_json(
+ INVALID_COMPLIANCE_IDENTIFIER.format("device_service_tags", ",".join(map(str, invalid_values)), name))
+ report_devices = list(set(device_service_tags_list) & set(compliance_report_map.keys()))
+ service_tag_id_map = dict(
+ [(item["ServiceTag"], item["Id"]) for item in compliance_reports["value"]])
+ noncomplaint_devices = [service_tag_id_map[device] for device in report_devices if compliance_report_map[device] == "NONCOMPLIANT"
+ or compliance_report_map[device] == 2]
+ else:
+ compliance_report_map = dict([(item["Id"], item["ComplianceStatus"]) for item in compliance_reports["value"]])
+ if not any(compliance_report_map):
+ module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG)
+ noncomplaint_devices = [device for device, compliance_status in compliance_report_map.items() if
+ compliance_status == "NONCOMPLIANT" or compliance_status == 2]
+ if len(noncomplaint_devices) == 0:
+ module.exit_json(msg=CHECK_MODE_NO_CHANGES_MSG)
+ if module.check_mode and noncomplaint_devices:
+ module.exit_json(msg=CHECK_MODE_CHANGES_MSG, changed=True)
+ return noncomplaint_devices, baseline_info
+
+
+def create_remediate_payload(noncomplaint_devices, baseline_info, rest_obj):
+ ome_version = get_ome_version(rest_obj)
+ payload = {
+ "Id": baseline_info["Id"],
+ "Schedule": {
+ "RunNow": True,
+ "RunLater": False
+ }
+ }
+ pattern = re.compile(r'(1|2|3)\.(0|1|2|3|4)\.?')
+ if pattern.match(ome_version):
+ payload["TargetIds"] = noncomplaint_devices
+ else:
+ payload["DeviceIds"] = noncomplaint_devices
+ return payload
+
+
+def remediate_baseline(module, rest_obj):
+ noncomplaint_devices, baseline_info = validate_remediate_idempotency(module, rest_obj)
+ remediate_payload = create_remediate_payload(noncomplaint_devices, baseline_info, rest_obj)
+ resp = rest_obj.invoke_request('POST', REMEDIATE_BASELINE, data=remediate_payload)
+ job_id = resp.json_data
+ if module.params.get("job_wait"):
+ job_failed, message = rest_obj.job_tracking(job_id, job_wait_sec=module.params["job_wait_timeout"])
+ if job_failed is True:
+ module.fail_json(msg=message, job_id=job_id, changed=False)
+ else:
+ if "successfully" in message:
+ module.exit_json(msg=REMEDIATE_MSG, job_id=job_id, changed=True)
+ else:
+ module.exit_json(msg=message, job_id=job_id, changed=False)
+ else:
+ module.exit_json(msg=TASK_PROGRESS_MSG, job_id=job_id, changed=True)
+
+
+def validate_job_time(command, module):
+ """
+ The command create, remediate and modify time validation
+ """
+ job_wait = module.params["job_wait"]
+ if command != "delete" and job_wait:
+ job_wait_timeout = module.params["job_wait_timeout"]
+ if job_wait_timeout <= 0:
+ module.fail_json(msg=INVALID_TIME.format(job_wait_timeout))
+
+
+def compliance_operation(module, rest_obj):
+ command = module.params.get("command")
+ validate_names(command, module)
+ validate_job_time(command, module)
+ if command == "create":
+ create_baseline(module, rest_obj)
+ if command == "modify":
+ modify_baseline(module, rest_obj)
+ if command == "delete":
+ delete_compliance(module, rest_obj)
+ if command == "remediate":
+ remediate_baseline(module, rest_obj)
+
+
+def main():
+ specs = {
+ "command": {"default": "create",
+ "choices": ['create', 'modify', 'delete', 'remediate']},
+ "names": {"required": True, "type": 'list', "elements": 'str'},
+ "template_name": {"type": 'str'},
+ "template_id": {"type": 'int'},
+ "device_ids": {"required": False, "type": 'list', "elements": 'int'},
+ "device_service_tags": {"required": False, "type": 'list', "elements": 'str'},
+ "device_group_names": {"required": False, "type": 'list', "elements": 'str'},
+ "description": {"type": 'str'},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ "job_wait_timeout": {"required": False, "type": 'int', "default": 10800},
+ "new_name": {"type": 'str'},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ['command', 'create', ['template_name', 'template_id'], True],
+ ['command', 'remediate', ['device_ids', 'device_service_tags', 'job_wait', 'job_wait_timeout'], True],
+ ['command', 'modify',
+ ['new_name', 'description', 'template_name', 'template_id', 'device_ids', 'device_service_tags',
+ 'device_group_names'], True],
+ ],
+ mutually_exclusive=[
+ ('device_ids', 'device_service_tags'),
+ ('device_ids', 'device_group_names'),
+ ('device_service_tags', 'device_group_names'),
+ ('template_id', 'template_name')],
+
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ compliance_operation(module, rest_obj)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py
new file mode 100644
index 000000000..d96cd3769
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 6.1.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ome_configuration_compliance_info
+short_description: Device compliance report for devices managed in OpenManage Enterprise
+version_added: "3.2.0"
+description: This module allows the generation of a compliance report of a specific or all
+ of devices in a configuration compliance baseline.
+extends_documentation_fragment:
+ - dellemc.openmanage.oment_auth_options
+options:
+ baseline:
+ required: True
+ description:
+ - The name of the created baseline.
+ - A compliance report is generated even when the template is not associated with the baseline.
+ type: str
+ device_id:
+ required: False
+ description:
+ - The ID of the target device which is associated with the I(baseline).
+ type: int
+ device_service_tag:
+ required: False
+ description:
+ - The device service tag of the target device associated with the I(baseline).
+ - I(device_service_tag) is mutually exclusive with I(device_id).
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen A (@felixs88)"
+ - "Kritika Bhateja (@Kritika-Bhateja)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieve the compliance report of all of the devices in the specified configuration compliance baseline.
+ dellemc.openmanage.ome_configuration_compliance_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline: baseline_name
+
+- name: Retrieve the compliance report for a specific device associated with the baseline using the device ID.
+ dellemc.openmanage.ome_configuration_compliance_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline: baseline_name
+ device_id: 10001
+
+- name: Retrieve the compliance report for a specific device associated with the baseline using the device service tag.
+ dellemc.openmanage.ome_configuration_compliance_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline: baseline_name
+ device_service_tag: 2HFGH3
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Over all compliance report status.
+ returned: on error
+ sample: "Unable to complete the operation because the entered target baseline name 'baseline' is invalid."
+compliance_info:
+ type: dict
+ description: Returns the compliance report information.
+ returned: success
+ sample: [{
+ "ComplianceAttributeGroups": [{
+ "Attributes": [],
+ "ComplianceReason": "One or more attributes on the target device(s) does not match the compliance template.",
+ "ComplianceStatus": 2,
+ "ComplianceSubAttributeGroups": [{
+ "Attributes": [{
+ "AttributeId": 75369,
+ "ComplianceReason": "Attribute has different value from template",
+ "ComplianceStatus": 3,
+ "CustomId": 0,
+ "Description": null,
+ "DisplayName": "Workload Profile",
+ "ExpectedValue": "HpcProfile",
+ "Value": "NotAvailable"
+ }],
+ "ComplianceReason": "One or more attributes on the target device(s) does not match the compliance template.",
+ "ComplianceStatus": 2,
+ "ComplianceSubAttributeGroups": [],
+ "DisplayName": "System Profile Settings",
+ "GroupNameId": 1
+ }],
+ "DisplayName": "BIOS",
+ "GroupNameId": 1
+ }],
+ "ComplianceStatus": "NONCOMPLIANT",
+ "DeviceName": "WIN-PLOV8MPIP40",
+ "DeviceType": 1000,
+ "Id": 25011,
+ "InventoryTime": "2021-03-18 00:01:57.809771",
+ "Model": "PowerEdge R7525",
+ "ServiceTag": "JHMBX53"
+ }]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+BASELINE_URI = "TemplateService/Baselines"
+CONFIG_COMPLIANCE_URI = "TemplateService/Baselines({0})/DeviceConfigComplianceReports"
+COMPLIANCE_URI = "TemplateService/Baselines({0})/DeviceConfigComplianceReports({1})/DeviceComplianceDetails"
+
+
+def validate_device(module, report, device_id=None, service_tag=None, base_id=None):
+ for each in report.get("value"):
+ if each["Id"] == device_id:
+ break
+ if each["ServiceTag"] == service_tag:
+ device_id = each["Id"]
+ break
+ else:
+ device_name = device_id if device_id is not None else service_tag
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "target device id or service tag '{0}' is invalid.".format(device_name))
+ return device_id
+
+
+def get_baseline_id(module, baseline_name, rest_obj):
+ report = rest_obj.get_all_report_details(BASELINE_URI)
+ base_id, template_id = None, None
+ for base in report["report_list"]:
+ if base["Name"] == baseline_name:
+ base_id = base["Id"]
+ template_id = base["TemplateId"]
+ break
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "target baseline name '{0}' is invalid.".format(baseline_name))
+ return base_id, template_id
+
+
+def compliance_report(module, rest_obj):
+ baseline_name = module.params.get("baseline")
+ device_id = module.params.get("device_id")
+ device_service_tag = module.params.get("device_service_tag")
+ baseline_id, template_id = get_baseline_id(module, baseline_name, rest_obj)
+ report = []
+ if device_id:
+ compliance_uri = COMPLIANCE_URI.format(baseline_id, device_id)
+ baseline_report = rest_obj.invoke_request("GET", compliance_uri)
+ if not baseline_report.json_data.get("ComplianceAttributeGroups") and template_id == 0:
+ module.fail_json(msg="The compliance report of the device not found as "
+ "there is no template associated with the baseline.")
+ device_compliance = baseline_report.json_data.get("ComplianceAttributeGroups")
+ else:
+ baseline_report = rest_obj.get_all_items_with_pagination(CONFIG_COMPLIANCE_URI.format(baseline_id))
+ if device_service_tag:
+ device_id = validate_device(module, baseline_report, device_id=device_id,
+ service_tag=device_service_tag, base_id=baseline_id)
+ report = list(filter(lambda d: d['Id'] in [device_id], baseline_report.get("value")))
+ else:
+ report = baseline_report.get("value")
+ device_compliance = report
+ if device_compliance:
+ for each in device_compliance:
+ compliance_uri = COMPLIANCE_URI.format(baseline_id, each["Id"])
+ attr_group = rest_obj.invoke_request("GET", compliance_uri)
+ each["ComplianceAttributeGroups"] = attr_group.json_data.get("ComplianceAttributeGroups")
+ return device_compliance
+
+
+def main():
+ specs = {
+ "baseline": {"required": True, "type": "str"},
+ "device_id": {"required": False, "type": "int"},
+ "device_service_tag": {"required": False, "type": "str"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[["device_id", "device_service_tag"]],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ report = compliance_report(module, rest_obj)
+ module.exit_json(compliance_info=report)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py
new file mode 100644
index 000000000..56c1def60
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py
@@ -0,0 +1,526 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 6.1.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_device_group
+short_description: Add or remove device(s) from a static device group on OpenManage Enterprise
+version_added: "3.3.0"
+description: This module allows to add or remove device(s) from a static device group on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.oment_auth_options
+options:
+ state:
+ type: str
+ description:
+ - C(present) allows to add the device(s) to a static device group.
+ - C(absent) allows to remove the device(s) from a static device group.
+ choices: [present, absent]
+ default: present
+ name:
+ type: str
+ description:
+ - Name of the static group.
+ - I(name) is mutually exclusive with I(group_id).
+ group_id:
+ type: int
+ description:
+ - ID of the static device.
+ - I(group_id) is mutually exclusive with I(name).
+ device_ids:
+ type: list
+ elements: int
+ description:
+ - List of ID(s) of the device(s) to be added or removed from the device group.
+ - I(device_ids) is mutually exclusive with I(device_service_tags) and I(ip_addresses).
+ device_service_tags:
+ type: list
+ elements: str
+ description:
+ - List of service tag(s) of the device(s) to be added or removed from the device group.
+ - I(device_service_tags) is mutually exclusive with I(device_ids) and I(ip_addresses).
+ ip_addresses:
+ type: list
+ elements: str
+ description:
+ - List of IPs of the device(s) to be added or removed from the device group.
+ - I(ip_addresses) is mutually exclusive with I(device_ids) and I(device_service_tags).
+ - "Supported IP address range formats:"
+ - " - 192.35.0.1"
+ - " - 10.36.0.0-192.36.0.255"
+ - " - 192.37.0.0/24"
+ - " - fe80::ffff:ffff:ffff:ffff"
+ - " - fe80::ffff:192.0.2.0/125"
+ - " - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff"
+ - C(NOTE) Hostname is not supported.
+ - C(NOTE) I(ip_addresses) requires python's netaddr packages to work on IP Addresses.
+ - C(NOTE) This module reports success even if one of the IP addresses provided in the I(ip_addresses) list is
+ available in OpenManage Enterprise.The module reports failure only if none of the IP addresses provided in the
+ list are available in OpenManage Enterprise.
+requirements:
+ - "python >= 3.8.6"
+ - "netaddr >= 0.7.19"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Sajna Shetty(@Sajna-Shetty)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Add devices to a static device group by using the group name and device IDs
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Storage Services"
+ device_ids:
+ - 11111
+ - 11112
+ - 11113
+
+- name: Add devices to a static device group by using the group name and device service tags
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Storage Services"
+ device_service_tags:
+ - GHRT2RL
+ - KJHDF3S
+ - LKIJNG6
+
+- name: Add devices to a static device group by using the group ID and device service tags
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ group_id: 12345
+ device_service_tags:
+ - GHRT2RL
+ - KJHDF3S
+
+- name: Add devices to a static device group by using the group name and IPv4 addresses
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Storage Services"
+ ip_addresses:
+ - 192.35.0.1
+ - 192.35.0.5
+
+- name: Add devices to a static device group by using the group ID and IPv6 addresses
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ group_id: 12345
+ ip_addresses:
+ - fe80::ffff:ffff:ffff:ffff
+ - fe80::ffff:ffff:ffff:2222
+
+- name: Add devices to a static device group by using the group ID and supported IPv4 and IPv6 address formats.
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ group_id: 12345
+ ip_addresses:
+ - 192.35.0.1
+ - 10.36.0.0-192.36.0.255
+ - 192.37.0.0/24
+ - fe80::ffff:ffff:ffff:ffff
+ - ::ffff:192.0.2.0/125
+ - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff
+
+- name: Remove devices from a static device group by using the group name and device IDs
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ name: "Storage Services"
+ device_ids:
+ - 11111
+ - 11112
+ - 11113
+
+- name: Remove devices from a static device group by using the group name and device service tags
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ name: "Storage Services"
+ device_service_tags:
+ - GHRT2RL
+ - KJHDF3S
+ - LKIJNG6
+
+- name: Remove devices from a static device group by using the group ID and device service tags
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ group_id: 12345
+ device_service_tags:
+ - GHRT2RL
+ - KJHDF3S
+
+- name: Remove devices from a static device group by using the group name and IPv4 addresses
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ name: "Storage Services"
+ ip_addresses:
+ - 192.35.0.1
+ - 192.35.0.5
+
+- name: Remove devices from a static device group by using the group ID and IPv6 addresses
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ group_id: 12345
+ ip_addresses:
+ - fe80::ffff:ffff:ffff:ffff
+ - fe80::ffff:ffff:ffff:2222
+
+- name: Remove devices from a static device group by using the group ID and supported IPv4 and IPv6 address formats.
+ dellemc.openmanage.ome_device_group:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ group_id: 12345
+ ip_addresses:
+ - 192.35.0.1
+ - 10.36.0.0-192.36.0.255
+ - 192.37.0.0/24
+ - fe80::ffff:ffff:ffff:ffff
+ - ::ffff:192.0.2.0/125
+ - fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff
+
+"""
+
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the device group settings.
+ returned: always
+ sample:
+ - "Successfully added member(s) to the device group."
+group_id:
+ type: int
+ description: ID of the group.
+ returned: success
+ sample: 21078
+ip_addresses_added:
+ type: list
+ description: IP Addresses which are added to the device group.
+ returned: success
+ sample: 21078
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+try:
+ from netaddr import IPAddress, IPNetwork, IPRange
+ from netaddr.core import AddrFormatError
+
+ HAS_NETADDR = True
+except ImportError:
+ HAS_NETADDR = False
+
+GROUP_URI = "GroupService/Groups"
+DEVICE_URI = "DeviceService/Devices"
+ADD_MEMBER_URI = "GroupService/Actions/GroupService.AddMemberDevices"
+REMOVE_MEMBER_URI = "GroupService/Actions/GroupService.RemoveMemberDevices"
+ADD_STATIC_GROUP_MESSAGE = "Devices can be added only to the static device groups created using OpenManage Enterprise."
+REMOVE_STATIC_GROUP_MESSAGE = "Devices can be removed only from the static device groups created using OpenManage Enterprise."
+NETADDR_ERROR = "The module requires python's netaddr be installed on the ansible controller to work on IP Addresses."
+INVALID_IP_FORMAT = "The format {0} of the IP address provided is not supported or invalid."
+IP_NOT_EXISTS = "The IP addresses provided do not exist in OpenManage Enterprise."
+
+
+def validate_group(group_resp, module, identifier, identifier_val):
+ if not group_resp:
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "target group {identifier} '{val}' is invalid.".format(identifier=identifier,
+ val=identifier_val))
+ system_groups = group_resp["TypeId"]
+ membership_id = group_resp["MembershipTypeId"]
+ if system_groups != 3000 or (system_groups == 3000 and membership_id == 24):
+ msg = ADD_STATIC_GROUP_MESSAGE if module.params.get("state", "present") == "present" else \
+ REMOVE_STATIC_GROUP_MESSAGE
+ module.fail_json(msg=msg)
+
+
+def get_group_id(rest_obj, module):
+ group_name = module.params.get("name")
+ group_id = module.params.get("group_id")
+ if group_name is not None:
+ group_resp = rest_obj.invoke_request("GET", GROUP_URI,
+ query_param={"$filter": "Name eq '{0}'".format(group_name)})
+ value = group_resp.json_data.get("value")
+ if value:
+ value = value[0]
+ else:
+ value = []
+ validate_group(value, module, "name", group_name)
+ group_id = value["Id"]
+
+ else:
+ uri = GROUP_URI + "(" + str(group_id) + ")"
+ try:
+ group_resp = rest_obj.invoke_request("GET", uri)
+ validate_group(group_resp.json_data, module, "Id", group_id)
+ except HTTPError:
+ validate_group({}, module, "Id", group_id)
+ return group_id
+
+
+def get_all_ips(ip_addresses, module):
+ ip_addresses_list = []
+ for ip in ip_addresses:
+ try:
+ if "/" in ip:
+ cidr_list = IPNetwork(ip)
+ ip_addresses_list.append(cidr_list)
+ elif "-" in ip and ip.count("-") == 1:
+ range_addr = ip.split("-")
+ range_list = IPRange(range_addr[0], range_addr[1])
+ ip_addresses_list.append(range_list)
+ else:
+ single_ip = IPAddress(ip)
+ ip_addresses_list.append(single_ip)
+ except (AddrFormatError, ValueError):
+ module.fail_json(msg=INVALID_IP_FORMAT.format(ip))
+ return ip_addresses_list
+
+
+def get_device_id_from_ip(ip_addresses, device_list, module):
+ ip_map = dict(
+ [(each_device["DeviceManagement"][0]["NetworkAddress"], each_device["Id"]) for each_device in device_list
+ if each_device["DeviceManagement"]])
+ device_id_list_map = {}
+ for available_ip, device_id in ip_map.items():
+ for ip_formats in ip_addresses:
+ if isinstance(ip_formats, IPAddress):
+ try:
+ ome_ip = IPAddress(available_ip)
+ except AddrFormatError:
+ ome_ip = IPAddress(available_ip.replace(']', '').replace('[', ''))
+ if ome_ip == ip_formats:
+ device_id_list_map.update({device_id: str(ip_formats)})
+ if not isinstance(ip_formats, IPAddress):
+ try:
+ ome_ip = IPAddress(available_ip)
+ except AddrFormatError:
+ ome_ip = IPAddress(available_ip.replace(']', '').replace('[', ''))
+ if ome_ip in ip_formats:
+ device_id_list_map.update({device_id: str(ome_ip)})
+ if len(device_id_list_map) == 0:
+ module.fail_json(msg=IP_NOT_EXISTS)
+ return device_id_list_map
+
+
+def get_device_id(rest_obj, module):
+ device_id_list = module.params.get("device_ids")
+ device_tag_list = module.params.get("device_service_tags")
+ ip_addresses = module.params.get("ip_addresses")
+ device_list = rest_obj.get_all_report_details(DEVICE_URI)
+ invalid, each_device_list, each_tag_to_id = [], [], []
+ if device_id_list or device_tag_list:
+ if device_id_list:
+ key = "Id"
+ each_device_list = device_id_list
+ elif device_tag_list:
+ key = "DeviceServiceTag"
+ each_device_list = device_tag_list
+
+ for each in each_device_list:
+ each_device = list(filter(lambda d: d[key] in [each], device_list["report_list"]))
+ if key == "DeviceServiceTag" and each_device:
+ each_tag_to_id.append(each_device[0]["Id"])
+ if not each_device:
+ invalid.append(str(each))
+ if invalid:
+ value = "id" if key == "Id" else "service tag"
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "target device {0}(s) '{1}' are invalid.".format(value, ",".join(set(invalid))))
+ if each_tag_to_id:
+ each_device_list = each_tag_to_id
+ else:
+ all_ips = get_all_ips(ip_addresses, module)
+ each_device_list = get_device_id_from_ip(all_ips, device_list["report_list"], module)
+ key = "IPAddresses"
+ return each_device_list, key
+
+
+def add_member_to_group(module, rest_obj, group_id, device_id, key):
+ group_device = rest_obj.get_all_report_details("{0}({1})/Devices".format(GROUP_URI, group_id))
+ device_exists, device_not_exists, added_ips = [], [], []
+ if key != "IPAddresses":
+ for each in device_id:
+ each_device = list(filter(lambda d: d["Id"] in [each], group_device["report_list"]))
+ if each_device:
+ tag_or_id = each_device[0][key] if key == "DeviceServiceTag" else each
+ device_exists.append(str(tag_or_id))
+ else:
+ device_not_exists.append(each)
+ else:
+ already_existing_id = []
+ for device in group_device["report_list"]:
+ if device["Id"] in device_id:
+ device_exists.append(device_id[device["Id"]])
+ already_existing_id.append(device["Id"])
+ device_not_exists = list(set(device_id.keys()) - set(already_existing_id))
+ added_ips = [ip for d_id, ip in device_id.items() if d_id in device_not_exists]
+ if module.check_mode and device_not_exists:
+ module.exit_json(msg="Changes found to be applied.", changed=True, group_id=group_id)
+ elif module.check_mode and not device_not_exists:
+ module.exit_json(msg="No changes found to be applied.", group_id=group_id)
+
+ if device_exists and not device_not_exists:
+ module.exit_json(
+ msg="No changes found to be applied.",
+ group_id=group_id
+ )
+ payload = {"GroupId": group_id, "MemberDeviceIds": device_not_exists}
+ response = rest_obj.invoke_request("POST", ADD_MEMBER_URI, data=payload)
+ return response, added_ips
+
+
+def get_current_member_of_group(rest_obj, group_id):
+ group_device = rest_obj.get_all_report_details("{0}({1})/Devices".format(GROUP_URI, group_id))
+
+ device_id_list = [each["Id"] for each in group_device["report_list"]]
+ return device_id_list
+
+
+def remove_member_from_group(module, rest_obj, group_id, device_id, current_device_list):
+ payload_device_list = [each_id for each_id in device_id if each_id in current_device_list]
+
+ if module.check_mode and payload_device_list:
+ module.exit_json(msg="Changes found to be applied.", changed=True, group_id=group_id)
+
+ if not payload_device_list:
+ module.exit_json(msg="No changes found to be applied.", group_id=group_id)
+
+ payload = {"GroupId": group_id, "MemberDeviceIds": payload_device_list}
+ response = rest_obj.invoke_request("POST", REMOVE_MEMBER_URI, data=payload)
+ return response
+
+
+def main():
+ specs = {
+ "name": {"type": "str"},
+ "group_id": {"type": "int"},
+ "state": {"required": False, "type": "str", "choices": ["present", "absent"], "default": "present"},
+ "device_service_tags": {"required": False, "type": "list", "elements": 'str'},
+ "device_ids": {"required": False, "type": "list", "elements": 'int'},
+ "ip_addresses": {"required": False, "type": "list", "elements": 'str'},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=(
+ ["state", "present", ("device_ids", "device_service_tags", "ip_addresses"), True],
+ ),
+ mutually_exclusive=(
+ ("name", "group_id"),
+ ("device_ids", "device_service_tags", "ip_addresses"),
+ ),
+ required_one_of=[("name", "group_id"),
+ ("device_ids", "device_service_tags", "ip_addresses")],
+ supports_check_mode=True
+ )
+
+ try:
+ if module.params.get("ip_addresses") and not HAS_NETADDR:
+ module.fail_json(msg=NETADDR_ERROR)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ group_id = get_group_id(rest_obj, module)
+ device_id, key = get_device_id(rest_obj, module)
+ if module.params["state"] == "present":
+ response, added_ips = add_member_to_group(module, rest_obj, group_id, device_id, key)
+ if added_ips:
+ module.exit_json(msg="Successfully added member(s) to the device group.",
+ group_id=group_id, changed=True, ip_addresses_added=added_ips)
+ module.exit_json(msg="Successfully added member(s) to the device group.",
+ group_id=group_id, changed=True)
+ else:
+ current_device_list = get_current_member_of_group(rest_obj, group_id)
+ resp = remove_member_from_group(module, rest_obj, group_id, device_id, current_device_list)
+ module.exit_json(msg="Successfully removed member(s) from the device group.", changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError,
+ IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py
new file mode 100644
index 000000000..846dd5e82
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2019-2022 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_device_info
+short_description: Retrieves the information of devices inventoried by OpenManage Enterprise
+version_added: "2.0.0"
+description:
+ - This module retrieves the list of devices in the inventory of OpenManage Enterprise along with the details of each device.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ fact_subset:
+ description:
+ - C(basic_inventory) returns the list of the devices.
+ - C(detailed_inventory) returns the inventory details of specified devices.
+ - C(subsystem_health) returns the health status of specified devices.
+ type: str
+ choices: [basic_inventory, detailed_inventory, subsystem_health ]
+ default: basic_inventory
+ system_query_options:
+ description:
+ - I(system_query_options) applicable for the choices of the fact_subset. Either I(device_id) or I(device_service_tag)
+ is mandatory for C(detailed_inventory) and C(subsystem_health) or both can be applicable.
+ type: dict
+ suboptions:
+ device_id:
+ description:
+ - A list of unique identifier is applicable
+ for C(detailed_inventory) and C(subsystem_health).
+ type: list
+ elements: int
+ device_service_tag:
+ description:
+ - A list of service tags are applicable for C(detailed_inventory)
+ and C(subsystem_health).
+ type: list
+ elements: str
+ inventory_type:
+ description:
+ - For C(detailed_inventory), it returns details of the specified inventory type.
+ type: str
+ filter:
+ description:
+ - For C(basic_inventory), it filters the collection of devices.
+ I(filter) query format should be aligned with OData standards.
+ type: str
+
+requirements:
+ - "python >= 3.8.6"
+author: "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve basic inventory of all devices
+ dellemc.openmanage.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering
+ dellemc.openmanage.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fact_subset: "basic_inventory"
+ system_query_options:
+ filter: "Id eq 33333 or Id eq 11111"
+
+- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222
+ dellemc.openmanage.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ - 22222
+
+- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567
+ dellemc.openmanage.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags
+ dellemc.openmanage.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fact_subset: "detailed_inventory"
+ system_query_options:
+ device_id:
+ - 11111
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+ inventory_type: "serverDeviceCards"
+
+- name: Retrieve subsystem health of specified devices identified by service tags
+ dellemc.openmanage.ome_device_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fact_subset: "subsystem_health"
+ system_query_options:
+ device_service_tag:
+ - MXL1234
+ - MXL4567
+
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Over all device information status.
+ returned: on error
+ sample: "Failed to fetch the device information"
+device_info:
+ type: dict
+ description: Returns the information collected from the Device.
+ returned: success
+ sample: {
+ "value": [
+ {
+ "Actions": null,
+ "AssetTag": null,
+ "ChassisServiceTag": null,
+ "ConnectionState": true,
+ "DeviceManagement": [
+ {
+ "DnsName": "dnsname.host.com",
+ "InstrumentationName": "MX-12345",
+ "MacAddress": "11:10:11:10:11:10",
+ "ManagementId": 12345,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 12345,
+ "ManagementProfileId": 12345,
+ "ManagementURL": "https://192.168.0.1:443",
+ "Status": 1000,
+ "StatusDateTime": "2019-01-21 06:30:08.501"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "192.168.0.1"
+ }
+ ],
+ "DeviceName": "MX-0003I",
+ "DeviceServiceTag": "MXL1234",
+ "DeviceSubscription": null,
+ "LastInventoryTime": "2019-01-21 06:30:08.501",
+ "LastStatusTime": "2019-01-21 06:30:02.492",
+ "ManagedState": 3000,
+ "Model": "PowerEdge MX7000",
+ "PowerState": 17,
+ "SlotConfiguration": {},
+ "Status": 4000,
+ "SystemId": 2031,
+ "Type": 2000
+ }
+ ]
+ }
+'''
+
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+DEVICES_INVENTORY_DETAILS = "detailed_inventory"
+DEVICES_SUBSYSTEM_HEALTH = "subsystem_health"
+DEVICES_INVENTORY_TYPE = "inventory_type"
+DEVICE_LIST = "basic_inventory"
+DESC_HTTP_ERROR = "HTTP Error 404: Not Found"
+device_fact_error_report = {}
+
+DEVICE_RESOURCE_COLLECTION = {
+ DEVICE_LIST: {"resource": "DeviceService/Devices"},
+ DEVICES_INVENTORY_DETAILS: {"resource": "DeviceService/Devices({Id})/InventoryDetails"},
+ DEVICES_INVENTORY_TYPE: {"resource": "DeviceService/Devices({Id})/InventoryDetails('{InventoryType}')"},
+ DEVICES_SUBSYSTEM_HEALTH: {"resource": "DeviceService/Devices({Id})/SubSystemHealth"},
+}
+
+
+def update_device_details_with_filtering(missing_service_tags, service_tag_dict, rest_obj):
+ """
+ This is a workaround solutions.
+ Use filtering query, in case fetching all report list fails for some reason.
+ Updates service_tag_dict if filtering request is success.
+ :param missing_service_tags: Service tags which are unable to fetch from pagination request.
+ :param service_tag_dict: this contains device id mapping with tags
+ :param rest_obj: ome connection object
+ :return: None.
+ """
+ try:
+ for tag in missing_service_tags:
+ query = "DeviceServiceTag eq '{0}'".format(tag)
+ query_param = {"$filter": query}
+ resp = rest_obj.invoke_request('GET', DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"], query_param=query_param)
+ value = resp.json_data["value"]
+ if value and value[0]["DeviceServiceTag"] == tag:
+ service_tag_dict.update({value[0]["Id"]: value[0]["DeviceServiceTag"]})
+ missing_service_tags.remove(tag)
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def _get_device_id_from_service_tags(service_tags, rest_obj):
+ """
+ Get device ids from device service tag
+ Returns :dict : device_id to service_tag map
+ :arg service_tags: service tag
+ :arg rest_obj: RestOME class object in case of request with session.
+ :returns: dict eg: {1345:"MXL1245"}
+ """
+ device_list = rest_obj.get_all_report_details(DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"])["report_list"]
+ service_tag_dict = {}
+ for item in device_list:
+ if item["DeviceServiceTag"] in service_tags:
+ service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]})
+ available_service_tags = service_tag_dict.values()
+ missing_service_tags = list(set(service_tags) - set(available_service_tags))
+ update_device_details_with_filtering(missing_service_tags, service_tag_dict, rest_obj)
+ device_fact_error_report.update(dict((tag, DESC_HTTP_ERROR) for tag in missing_service_tags))
+ return service_tag_dict
+
+
+def is_int(val):
+ """check when device_id numeric represented value is int"""
+ try:
+ int(val)
+ return True
+ except ValueError:
+ return False
+
+
+def _check_duplicate_device_id(device_id_list, service_tag_dict):
+ """If service_tag is duplicate of device_id, then updates the message as Duplicate report
+ :arg1: device_id_list : list of device_id
+ :arg2: service_tag_id_dict: dictionary of device_id to service tag map"""
+ if device_id_list:
+ device_id_represents_int = [int(device_id) for device_id in device_id_list if device_id and is_int(device_id)]
+ common_val = list(set(device_id_represents_int) & set(service_tag_dict.keys()))
+ for device_id in common_val:
+ device_fact_error_report.update(
+ {service_tag_dict[device_id]: "Duplicate report of device_id: {0}".format(device_id)})
+ del service_tag_dict[device_id]
+
+
+def _get_device_identifier_map(module_params, rest_obj):
+ """
+ Builds the identifiers mapping
+ :returns: the dict of device_id to server_tag map
+ eg: {"device_id":{1234: None},"device_service_tag":{1345:"MXL1234"}}"""
+ system_query_options_param = module_params.get("system_query_options")
+ device_id_service_tag_dict = {}
+ if system_query_options_param is not None:
+ device_id_list = system_query_options_param.get("device_id")
+ device_service_tag_list = system_query_options_param.get("device_service_tag")
+ if device_id_list:
+ device_id_dict = dict((device_id, None) for device_id in list(set(device_id_list)))
+ device_id_service_tag_dict["device_id"] = device_id_dict
+ if device_service_tag_list:
+ service_tag_dict = _get_device_id_from_service_tags(device_service_tag_list,
+ rest_obj)
+
+ _check_duplicate_device_id(device_id_list, service_tag_dict)
+ device_id_service_tag_dict["device_service_tag"] = service_tag_dict
+ return device_id_service_tag_dict
+
+
+def _get_query_parameters(module_params):
+ """
+ Builds query parameter
+ :returns: dictionary, which is applicable builds the query format
+ eg : {"$filter":"Type eq 2000"}
+ """
+ system_query_options_param = module_params.get("system_query_options")
+ query_parameter = None
+ if system_query_options_param:
+ filter_by_val = system_query_options_param.get("filter")
+ if filter_by_val:
+ query_parameter = {"$filter": filter_by_val}
+ return query_parameter
+
+
+def _get_resource_parameters(module_params, rest_obj):
+ """
+ Identifies the resource path by different states
+ :returns: dictionary containing identifier with respective resource path
+ eg:{"device_id":{1234:""DeviceService/Devices(1234)/InventoryDetails"},
+ "device_service_tag":{"MXL1234":"DeviceService/Devices(1345)/InventoryDetails"}}
+ """
+ fact_subset = module_params["fact_subset"]
+ path_dict = {}
+ if fact_subset != DEVICE_LIST:
+ inventory_type = None
+ device_id_service_tag_dict = _get_device_identifier_map(module_params, rest_obj)
+ if fact_subset == DEVICES_INVENTORY_DETAILS:
+ system_query_options = module_params.get("system_query_options")
+ inventory_type = system_query_options.get(DEVICES_INVENTORY_TYPE)
+ path_identifier = DEVICES_INVENTORY_TYPE if inventory_type else fact_subset
+ for identifier_type, identifier_dict in device_id_service_tag_dict.items():
+ path_dict[identifier_type] = {}
+ for device_id, service_tag in identifier_dict.items():
+ key_identifier = service_tag if identifier_type == "device_service_tag" else device_id
+ path = DEVICE_RESOURCE_COLLECTION[path_identifier]["resource"].format(Id=device_id,
+ InventoryType=inventory_type)
+ path_dict[identifier_type].update({key_identifier: path})
+ else:
+ path_dict.update({DEVICE_LIST: DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"]})
+ return path_dict
+
+
+def _check_mutually_inclusive_arguments(val, module_params, required_args):
+ """"
+ Throws error if arguments detailed_inventory, subsystem_health
+ not exists with qualifier device_id or device_service_tag"""
+ system_query_options_param = module_params.get("system_query_options")
+ if system_query_options_param is None or (system_query_options_param is not None and not any(
+ system_query_options_param.get(qualifier) for qualifier in required_args)):
+ raise ValueError("One of the following {0} is required for {1}".format(required_args, val))
+
+
+def _validate_inputs(module_params):
+ """validates input parameters"""
+ fact_subset = module_params["fact_subset"]
+ if fact_subset != "basic_inventory":
+ _check_mutually_inclusive_arguments(fact_subset, module_params, ["device_id", "device_service_tag"])
+
+
+def main():
+ system_query_options = {"type": 'dict', "required": False, "options": {
+ "device_id": {"type": 'list', "elements": 'int'},
+ "device_service_tag": {"type": 'list', "elements": 'str'},
+ "inventory_type": {"type": 'str'},
+ "filter": {"type": 'str', "required": False},
+ }}
+
+ specs = {
+ "fact_subset": {"required": False, "default": "basic_inventory",
+ "choices": ['basic_inventory', 'detailed_inventory', 'subsystem_health']},
+ "system_query_options": system_query_options,
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['fact_subset', 'detailed_inventory', ['system_query_options']],
+ ['fact_subset', 'subsystem_health', ['system_query_options']]],
+ supports_check_mode=True)
+
+ try:
+ _validate_inputs(module.params)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ device_facts = _get_resource_parameters(module.params, rest_obj)
+ resp_status = []
+ if device_facts.get("basic_inventory"):
+ query_param = _get_query_parameters(module.params)
+ if query_param is not None:
+ resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param)
+ device_facts = resp.json_data
+ resp_status.append(resp.status_code)
+ else:
+ device_report = rest_obj.get_all_report_details(DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"])
+ device_facts = {"@odata.context": device_report["resp_obj"].json_data["@odata.context"],
+ "@odata.count": len(device_report["report_list"]),
+ "value": device_report["report_list"]}
+ resp_status.append(device_report["resp_obj"].status_code)
+ if device_facts["@odata.count"] == 0:
+ module.exit_json(msg="No devices present.", device_info=[])
+ else:
+ for identifier_type, path_dict_map in device_facts.items():
+ for identifier, path in path_dict_map.items():
+ try:
+ resp = rest_obj.invoke_request('GET', path)
+ data = resp.json_data
+ resp_status.append(resp.status_code)
+ except HTTPError as err:
+ data = str(err)
+ path_dict_map[identifier] = data
+ if any(device_fact_error_report):
+ if "device_service_tag" in device_facts:
+ device_facts["device_service_tag"].update(device_fact_error_report)
+ else:
+ device_facts["device_service_tag"] = device_fact_error_report
+ if 200 in resp_status:
+ module.exit_json(device_info=device_facts)
+ else:
+ module.exit_json(msg="Unable to fetch the device information because the requested device id(s) or "
+ "device service tag(s) does not exist.",
+ device_info=[])
+
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
new file mode 100644
index 000000000..9b48e33dd
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
@@ -0,0 +1,481 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_device_local_access_configuration
+short_description: Configure local access settings on OpenManage Enterprise Modular.
+description: This module allows to configure the local access settings of the power button, quick sync, KVM,
+ LCD, and chassis direct access on OpenManage Enterprise Modular.
+version_added: "4.4.0"
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_id:
+ type: int
+ description:
+ - The ID of the chassis for which the local access configuration to be updated.
+ - If the device ID is not specified, this module updates the local access settings for the I(hostname).
+ - I(device_id) is mutually exclusive with I(device_service_tag).
+ device_service_tag:
+ type: str
+ description:
+ - The service tag of the chassis for which the local access settings needs to be updated.
+ - If the device service tag is not specified, this module updates the local access settings for the I(hostname).
+ - I(device_service_tag) is mutually exclusive with I(device_id).
+ enable_kvm_access:
+ type: bool
+ description: Enables or disables the keyboard, video, and mouse (KVM) interfaces.
+ enable_chassis_direct_access:
+ type: bool
+ description: Enables or disables the access to management consoles such as iDRAC and the management module of
+ the device on the chassis.
+ chassis_power_button:
+ type: dict
+ description: The settings for the chassis power button.
+ suboptions:
+ enable_chassis_power_button:
+ required: true
+ type: bool
+ description:
+ - Enables or disables the chassis power button.
+ - If C(False), the chassis cannot be turn on or turn off using the power button.
+ enable_lcd_override_pin:
+ type: bool
+ description:
+ - Enables or disables the LCD override pin.
+ - This is required when I(enable_chassis_power_button) is C(False).
+ disabled_button_lcd_override_pin:
+ type: int
+ description:
+ - The six digit LCD override pin to change the power state of the chassis.
+ - This is required when I(enable_lcd_override_pin) is C(True).
+ - The module will always report change when I(disabled_button_lcd_override_pin) is C(True).
+ quick_sync:
+ type: dict
+ description:
+ - The settings for quick sync.
+ - The I(quick_sync) options are ignored if the quick sync hardware is not present.
+ suboptions:
+ quick_sync_access:
+ type: str
+ choices: [READ_WRITE, READ_ONLY, DISABLED]
+ description:
+ - Users with administrator privileges can set the following types of I(quick_sync_access).
+ - C(READ_WRITE) enables writing configuration using quick sync.
+ - C(READ_ONLY) enables read only access to Wi-Fi and Bluetooth Low Energy(BLE).
+ - C(DISABLED) disables reading or writing configuration through quick sync.
+ enable_inactivity_timeout:
+ type: bool
+ description: Enables or disables the inactivity timeout.
+ timeout_limit:
+ type: int
+ description:
+ - Inactivity timeout in seconds or minutes.
+ - The range is 120 to 3600 in seconds, or 2 to 60 in minutes.
+ - This option is required when I(enable_inactivity_timeout) is C(True).
+ timeout_limit_unit:
+ type: str
+ choices: [SECONDS, MINUTES]
+ description:
+ - Inactivity timeout limit unit.
+ - C(SECONDS) to set I(timeout_limit) in seconds.
+ - C(MINUTES) to set I(timeout_limit) in minutes.
+ - This option is required when I(enable_inactivity_timeout) is C(True).
+ enable_read_authentication:
+ type: bool
+ description: Enables or disables the option to log in using your user credentials and to read the
+ inventory in a secure data center.
+ enable_quick_sync_wifi:
+ type: bool
+ description: Enables or disables the Wi-Fi communication path to the chassis.
+ lcd:
+ type: dict
+ description:
+ - The settings for LCD.
+ - The I(lcd) options are ignored if the LCD hardware is not present in the chassis.
+ suboptions:
+ lcd_access:
+ type: str
+ choices: [VIEW_AND_MODIFY, VIEW_ONLY, DISABLED]
+ description:
+ - Option to configure the quick sync settings using LCD.
+ - C(VIEW_AND_MODIFY) to set access level to view and modify.
+ - C(VIEW_ONLY) to set access level to view.
+ - C(DISABLED) to disable the access.
+ user_defined:
+ type: str
+ description: The text to display on the LCD Home screen. The LCD Home screen is displayed when the system
+ is reset to factory default settings. The user-defined text can have a maximum of 62 characters.
+ lcd_language:
+ type: str
+ description:
+ - The language code in which the text on the LCD must be displayed.
+ - en to set English language.
+ - fr to set French language.
+ - de to set German language.
+ - es to set Spanish language.
+ - ja to set Japanese language.
+ - zh to set Chinese language.
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+ - The module will always report change when I(enable_chassis_power_button) is C(True).
+"""
+
+EXAMPLES = """
+---
+- name: Configure KVM, direct access and power button settings of the chassis using device ID.
+ dellemc.openmanage.ome_device_local_access_configuration:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ enable_kvm_access: true
+ enable_chassis_direct_access: false
+ chassis_power_button:
+ enable_chassis_power_button: false
+ enable_lcd_override_pin: true
+ disabled_button_lcd_override_pin: 123456
+
+- name: Configure Quick sync and LCD settings of the chassis using device service tag.
+ dellemc.openmanage.ome_device_local_access_configuration:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ quick_sync:
+ quick_sync_access: READ_ONLY
+ enable_read_authentication: true
+ enable_quick_sync_wifi: true
+ enable_inactivity_timeout: true
+ timeout_limit: 10
+ timeout_limit_unit: MINUTES
+ lcd:
+ lcd_access: VIEW_ONLY
+ lcd_language: en
+ user_defined: "LCD Text"
+
+- name: Configure all local access settings of the host chassis.
+ dellemc.openmanage.ome_device_local_access_configuration:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_kvm_access: true
+ enable_chassis_direct_access: false
+ chassis_power_button:
+ enable_chassis_power_button: false
+ enable_lcd_override_pin: true
+ disabled_button_lcd_override_pin: 123456
+ quick_sync:
+ quick_sync_access: READ_WRITE
+ enable_read_authentication: true
+ enable_quick_sync_wifi: true
+ enable_inactivity_timeout: true
+ timeout_limit: 120
+ timeout_limit_unit: SECONDS
+ lcd:
+ lcd_access: VIEW_MODIFY
+ lcd_language: en
+ user_defined: "LCD Text"
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the device local access settings.
+ returned: always
+ sample: "Successfully updated the local access settings."
+location_details:
+ type: dict
+ description: returned when local access settings are updated successfully.
+ returned: success
+ sample: {
+ "SettingType": "LocalAccessConfiguration",
+ "EnableChassisDirect": false,
+ "EnableChassisPowerButton": false,
+ "EnableKvmAccess": true,
+ "EnableLcdOverridePin": false,
+ "LcdAccess": "VIEW_ONLY",
+ "LcdCustomString": "LCD Text",
+ "LcdLanguage": "en",
+ "LcdOverridePin": "",
+ "LcdPinLength": null,
+ "LcdPresence": "Present",
+ "LedPresence": null,
+ "QuickSync": {
+ "EnableInactivityTimeout": true,
+ "EnableQuickSyncWifi": false,
+ "EnableReadAuthentication": false,
+ "QuickSyncAccess": "READ_ONLY",
+ "QuickSyncHardware": "Present",
+ "TimeoutLimit": 7,
+ "TimeoutLimitUnit": "MINUTES"
+ }
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+import json
+import socket
+import copy
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+DOMAIN_URI = "ManagementDomainService/Domains"
+DEVICE_URI = "DeviceService/Devices"
+LAC_API = "DeviceService/Devices({0})/Settings('LocalAccessConfiguration')"
+CONFIG_FAIL_MSG = "one of the following is required: enable_kvm_access, enable_chassis_direct_access, " \
+ "chassis_power_button, quick_sync, lcd"
+DOMAIN_FAIL_MSG = "The operation to configure the local access is supported only on " \
+ "OpenManage Enterprise Modular."
+FETCH_FAIL_MSG = "Unable to retrieve the device information."
+DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid."
+LAC_FAIL_MSG = "Unable to complete the operation because the local access configuration settings " \
+ "are not supported on the specified device."
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+SUCCESS_MSG = "Successfully updated the local access settings."
+
+
+def get_ip_from_host(hostname):
+ ipaddr = hostname
+ try:
+ result = socket.getaddrinfo(hostname, None)
+ last_element = result[-1]
+ ip_address = last_element[-1][0]
+ if ip_address:
+ ipaddr = ip_address
+ except socket.gaierror:
+ ipaddr = hostname
+ except Exception:
+ ipaddr = hostname
+ return ipaddr
+
+
+def get_chassis_device(module, rest_obj):
+ key, value = None, None
+ ipaddress = get_ip_from_host(module.params["hostname"])
+ resp = rest_obj.invoke_request("GET", DOMAIN_URI)
+ for data in resp.json_data["value"]:
+ if ipaddress in data["PublicAddress"]:
+ key, value = ("Id", data["DeviceId"])
+ break
+ else:
+ module.fail_json(msg=FETCH_FAIL_MSG)
+ return key, value
+
+
+def check_domain_service(module, rest_obj):
+ try:
+ rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5)
+ except HTTPError as err:
+ err_message = json.load(err)
+ if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
+ module.fail_json(msg=DOMAIN_FAIL_MSG)
+ return
+
+
+def check_mode_validation(module, loc_resp):
+ exist_config = {
+ "EnableKvmAccess": loc_resp["EnableKvmAccess"], "EnableChassisDirect": loc_resp["EnableChassisDirect"],
+ "EnableChassisPowerButton": loc_resp["EnableChassisPowerButton"],
+ "EnableLcdOverridePin": loc_resp["EnableLcdOverridePin"], "LcdAccess": loc_resp["LcdAccess"],
+ "LcdCustomString": loc_resp["LcdCustomString"], "LcdLanguage": loc_resp["LcdLanguage"]}
+ quick_sync = loc_resp["QuickSync"]
+ exist_quick_config = {
+ "QuickSyncAccess": quick_sync["QuickSyncAccess"], "TimeoutLimit": quick_sync["TimeoutLimit"],
+ "EnableInactivityTimeout": quick_sync["EnableInactivityTimeout"],
+ "TimeoutLimitUnit": quick_sync["TimeoutLimitUnit"],
+ "EnableReadAuthentication": quick_sync["EnableReadAuthentication"],
+ "EnableQuickSyncWifi": quick_sync["EnableQuickSyncWifi"]}
+ req_config, req_quick_config, payload = {}, {}, {}
+ lcd_options, chassis_power = module.params.get("lcd"), module.params.get("chassis_power_button")
+ if loc_resp["LcdPresence"] == "Present" and lcd_options is not None:
+ req_config["LcdCustomString"] = lcd_options.get("user_defined")
+ req_config["LcdAccess"] = lcd_options.get("lcd_access")
+ req_config["LcdLanguage"] = lcd_options.get("lcd_language")
+ req_config["EnableKvmAccess"] = module.params.get("enable_kvm_access")
+ req_config["EnableChassisDirect"] = module.params.get("enable_chassis_direct_access")
+ if chassis_power is not None:
+ power_button = chassis_power["enable_chassis_power_button"]
+ if power_button is False:
+ chassis_pin = chassis_power.get("enable_lcd_override_pin")
+ if chassis_pin is True:
+ exist_config["LcdOverridePin"] = loc_resp["LcdOverridePin"]
+ req_config["LcdOverridePin"] = chassis_power["disabled_button_lcd_override_pin"]
+ req_config["EnableLcdOverridePin"] = chassis_pin
+ req_config["EnableChassisPowerButton"] = power_button
+ q_sync = module.params.get("quick_sync")
+ if q_sync is not None and loc_resp["QuickSync"]["QuickSyncHardware"] == "Present":
+ req_quick_config["QuickSyncAccess"] = q_sync.get("quick_sync_access")
+ req_quick_config["EnableReadAuthentication"] = q_sync.get("enable_read_authentication")
+ req_quick_config["EnableQuickSyncWifi"] = q_sync.get("enable_quick_sync_wifi")
+ if q_sync.get("enable_inactivity_timeout") is True:
+ time_limit, time_unit = q_sync.get("timeout_limit"), q_sync.get("timeout_limit_unit")
+ if q_sync.get("timeout_limit_unit") == "MINUTES":
+ time_limit, time_unit = time_limit * 60, "SECONDS"
+ req_quick_config["TimeoutLimit"] = time_limit
+ req_quick_config["TimeoutLimitUnit"] = time_unit
+ req_quick_config["EnableInactivityTimeout"] = q_sync.get("enable_inactivity_timeout")
+ req_config = dict([(k, v) for k, v in req_config.items() if v is not None])
+ req_quick_config = dict([(k, v) for k, v in req_quick_config.items() if v is not None])
+ cloned_req_config = copy.deepcopy(exist_config)
+ cloned_req_config.update(req_config)
+ cloned_req_quick_config = copy.deepcopy(exist_quick_config)
+ cloned_req_quick_config.update(req_quick_config)
+ diff_changes = [bool(set(exist_config.items()) ^ set(cloned_req_config.items())) or
+ bool(set(exist_quick_config.items()) ^ set(cloned_req_quick_config.items()))]
+ if module.check_mode and any(diff_changes) is True:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and all(diff_changes) is False) or \
+ (not module.check_mode and all(diff_changes) is False):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ payload.update(cloned_req_config)
+ payload["QuickSync"] = cloned_req_quick_config
+ payload["QuickSync"]["QuickSyncHardware"] = loc_resp["QuickSync"]["QuickSyncHardware"]
+ payload["SettingType"] = "LocalAccessConfiguration"
+ payload["LcdPresence"] = loc_resp["LcdPresence"]
+ return payload
+
+
+def get_device_details(rest_obj, module):
+ device_id, tag = module.params.get("device_id"), module.params.get("device_service_tag")
+ if device_id is None and tag is None:
+ key, value = get_chassis_device(module, rest_obj)
+ device_id = value
+ else:
+ key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag)
+ param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value)
+ resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value})
+ resp_data = resp.json_data.get("value")
+ rename_key = "id" if key == "Id" else "service tag"
+ if not resp_data:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag:
+ device_id = resp_data[0]["Id"]
+ elif key == "Id" and resp_data[0]["Id"] == device_id:
+ device_id = resp_data[0]["Id"]
+ else:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ try:
+ loc_resp = rest_obj.invoke_request("GET", LAC_API.format(device_id))
+ except HTTPError as err:
+ err_message = json.load(err)
+ error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo')
+ if error_msg and error_msg[0].get("MessageId") == "CGEN1004":
+ module.fail_json(msg=LAC_FAIL_MSG)
+ else:
+ payload = check_mode_validation(module, loc_resp.json_data)
+ final_resp = rest_obj.invoke_request("PUT", LAC_API.format(device_id), data=payload)
+ return final_resp
+
+
+def main():
+ chassis_power = {
+ "enable_chassis_power_button": {"type": "bool", "required": True},
+ "enable_lcd_override_pin": {"type": "bool", "required": False},
+ "disabled_button_lcd_override_pin": {"type": "int", "required": False, "no_log": True}}
+ quick_sync_options = {
+ "quick_sync_access": {"type": "str", "required": False, "choices": ["DISABLED", "READ_ONLY", "READ_WRITE"]},
+ "enable_inactivity_timeout": {"type": "bool", "required": False},
+ "timeout_limit": {"type": "int", "required": False},
+ "timeout_limit_unit": {"type": "str", "required": False, "choices": ["SECONDS", "MINUTES"]},
+ "enable_read_authentication": {"type": "bool", "required": False},
+ "enable_quick_sync_wifi": {"type": "bool", "required": False}}
+ lcd_options = {
+ "lcd_access": {"type": "str", "required": False, "choices": ["VIEW_AND_MODIFY", "VIEW_ONLY", "DISABLED"]},
+ "user_defined": {"type": "str", "required": False},
+ "lcd_language": {"type": "str", "required": False}}
+ specs = {
+ "device_id": {"required": False, "type": "int"},
+ "device_service_tag": {"required": False, "type": "str"},
+ "enable_kvm_access": {"required": False, "type": "bool"},
+ "enable_chassis_direct_access": {"required": False, "type": "bool"},
+ "chassis_power_button": {
+ "required": False, "type": "dict", "options": chassis_power,
+ "required_if": [["enable_lcd_override_pin", True, ("disabled_button_lcd_override_pin",)]],
+ },
+ "quick_sync": {
+ "required": False, "type": "dict", "options": quick_sync_options,
+ "required_if": [["enable_inactivity_timeout", True, ("timeout_limit", "timeout_limit_unit")]]
+ },
+ "lcd": {
+ "required": False, "type": "dict", "options": lcd_options,
+ },
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[('device_id', 'device_service_tag')],
+ required_one_of=[["enable_kvm_access", "enable_chassis_direct_access",
+ "chassis_power_button", "quick_sync", "lcd"]],
+ supports_check_mode=True,
+ )
+ try:
+ if not any([module.params.get("chassis_power_button"), module.params.get("quick_sync"),
+ module.params.get("lcd"), module.params.get("enable_kvm_access") is not None,
+ module.params.get("enable_chassis_direct_access") is not None]):
+ module.fail_json(msg=CONFIG_FAIL_MSG)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ check_domain_service(module, rest_obj)
+ resp = get_device_details(rest_obj, module)
+ resp_data = resp.json_data
+ quick_sync = module.params.get("quick_sync")
+ if quick_sync is not None and quick_sync.get("enable_inactivity_timeout") is True and \
+ quick_sync.get("timeout_limit_unit") == "MINUTES":
+ resp_data["QuickSync"]["TimeoutLimit"] = int(resp_data["QuickSync"]["TimeoutLimit"] / 60)
+ resp_data["QuickSync"]["TimeoutLimitUnit"] = "MINUTES"
+ module.exit_json(msg=SUCCESS_MSG, local_access_settings=resp_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py
new file mode 100644
index 000000000..96a61a29b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_device_location
+short_description: Configure device location settings on OpenManage Enterprise Modular
+description: This module allows to configure the device location settings of the chassis
+ on OpenManage Enterprise Modular.
+version_added: "4.2.0"
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_id:
+ type: int
+ description:
+ - The ID of the chassis for which the settings need to be updated.
+ - If the device ID is not specified, this module updates
+ the location settings for the I(hostname).
+ - I(device_id) is mutually exclusive with I(device_service_tag).
+ device_service_tag:
+ type: str
+ description:
+ - The service tag of the chassis for which the settings need to be updated.
+ - If the device service tag is not specified, this module updates
+ the location settings for the I(hostname).
+ - I(device_service_tag) is mutually exclusive with I(device_id).
+ data_center:
+ type: str
+ description: The data center name of the chassis.
+ room:
+ type: str
+ description: The room of the chassis.
+ aisle:
+ type: str
+ description: The aisle of the chassis.
+ rack:
+ type: str
+ description: The rack name of the chassis.
+ rack_slot:
+ type: int
+ description: The rack slot number of the chassis.
+ location:
+ type: str
+ description: The physical location of the chassis.
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Update device location settings of a chassis using the device ID.
+ dellemc.openmanage.ome_device_location:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ data_center: data center 1
+ room: room 1
+ aisle: aisle 1
+ rack: rack 1
+ rack_slot: 2
+ location: location 1
+
+- name: Update device location settings of a chassis using the device service tag.
+ dellemc.openmanage.ome_device_location:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ data_center: data center 2
+ room: room 7
+ aisle: aisle 4
+ rack: rack 6
+ rack_slot: 22
+ location: location 5
+
+- name: Update device location settings of the host chassis.
+ dellemc.openmanage.ome_device_location:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ data_center: data center 3
+ room: room 3
+ aisle: aisle 1
+ rack: rack 7
+ rack_slot: 10
+ location: location 9
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the device location settings.
+ returned: always
+ sample: "Successfully updated the location settings."
+location_details:
+ type: dict
+ description: returned when location settings are updated successfully.
+ returned: success
+ sample: {
+ "Aisle": "aisle 1",
+ "DataCenter": "data center 1",
+ "Location": "location 1",
+ "RackName": "rack 1",
+ "RackSlot": 2,
+ "Room": "room 1",
+ "SettingType": "Location"
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+import json
+import socket
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+LOCATION_API = "DeviceService/Devices({0})/Settings('Location')"
+DEVICE_URI = "DeviceService/Devices"
+DOMAIN_URI = "ManagementDomainService/Domains"
+DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid."
+
+
+def check_domain_service(module, rest_obj):
+ try:
+ rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5)
+ except HTTPError as err:
+ err_message = json.load(err)
+ if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
+ module.fail_json(msg="The device location settings operation is supported only on "
+ "OpenManage Enterprise Modular systems.")
+ return
+
+
+def validate_dictionary(module, loc_resp):
+ data_center = module.params.get("data_center")
+ room = module.params.get("room")
+ aisle = module.params.get("aisle")
+ rack = module.params.get("rack")
+ rack_slot = module.params.get("rack_slot")
+ location = module.params.get("location")
+ req_dict = {"DataCenter": data_center, "Room": room, "Aisle": aisle, "RackName": rack, "Location": location}
+ req_filter_none = dict((k, v) for k, v in req_dict.items() if v is not None)
+ keys = list(req_filter_none.keys())
+ exit_dict = dict((k, v) for k, v in loc_resp.items() if k in keys and v is not None)
+ if rack_slot is not None:
+ req_dict.update({"RackSlot": rack_slot})
+ req_filter_none.update({"RackSlot": rack_slot})
+ exit_dict.update({"RackSlot": loc_resp["RackSlot"]})
+ diff = bool(set(req_filter_none.items()) ^ set(exit_dict.items()))
+ if not diff and not module.check_mode:
+ module.exit_json(msg="No changes found to be applied.")
+ elif not diff and module.check_mode:
+ module.exit_json(msg="No changes found to be applied.")
+ elif diff and module.check_mode:
+ module.exit_json(msg="Changes found to be applied.", changed=True)
+ payload_dict = {"SettingType": "Location"}
+ payload_dict.update(dict((k, v) for k, v in loc_resp.items() if k in req_dict.keys()))
+ payload_dict.update(req_filter_none)
+ if req_filter_none.get("RackSlot") is None:
+ payload_dict.update({"RackSlot": loc_resp.get("RackSlot")})
+ return payload_dict
+
+
+def get_ip_from_host(hostname):
+ ipaddr = hostname
+ try:
+ result = socket.getaddrinfo(hostname, None)
+ last_element = result[-1]
+ ip_address = last_element[-1][0]
+ if ip_address:
+ ipaddr = ip_address
+ except socket.gaierror:
+ ipaddr = hostname
+ except Exception:
+ ipaddr = hostname
+ return ipaddr
+
+
+def standalone_chassis(module, rest_obj):
+ key, value = None, None
+ ipaddress = get_ip_from_host(module.params["hostname"])
+ resp = rest_obj.invoke_request("GET", DOMAIN_URI)
+ for data in resp.json_data["value"]:
+ if ipaddress in data["PublicAddress"]:
+ key, value = ("Id", data["DeviceId"])
+ break
+ else:
+ module.fail_json(msg="Failed to fetch the device information.")
+ return key, value
+
+
+def device_validation(module, rest_obj):
+ final_resp = {}
+ device_id, tag = module.params.get("device_id"), module.params.get("device_service_tag")
+ if device_id is None and tag is None:
+ key, value = standalone_chassis(module, rest_obj)
+ device_id = value
+ else:
+ key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag)
+ param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value)
+ resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value})
+ resp_data = resp.json_data.get("value")
+ rename_key = "id" if key == "Id" else "service tag"
+ if not resp_data:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag:
+ device_id = resp_data[0]["Id"]
+ elif key == "Id" and resp_data[0]["Id"] == device_id:
+ device_id = resp_data[0]["Id"]
+ else:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ try:
+ loc_resp = rest_obj.invoke_request("GET", LOCATION_API.format(device_id))
+ except HTTPError as err:
+ err_message = json.load(err)
+ error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo')
+ if error_msg and error_msg[0].get("MessageId") == "CGEN1004":
+ module.fail_json(msg="Unable to complete the operation because the location settings "
+ "are not supported on the specified device.")
+ else:
+ payload = validate_dictionary(module, loc_resp.json_data)
+ final_resp = rest_obj.invoke_request("PUT", LOCATION_API.format(device_id), data=payload)
+ return final_resp
+
+
+def main():
+ specs = {
+ "device_id": {"required": False, "type": "int"},
+ "device_service_tag": {"required": False, "type": "str"},
+ "data_center": {"required": False, "type": "str"},
+ "room": {"required": False, "type": "str"},
+ "aisle": {"required": False, "type": "str"},
+ "rack": {"required": False, "type": "str"},
+ "rack_slot": {"required": False, "type": "int"},
+ "location": {"required": False, "type": "str"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[('device_id', 'device_service_tag')],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ check_domain_service(module, rest_obj)
+ resp = device_validation(module, rest_obj)
+ module.exit_json(msg="Successfully updated the location settings.",
+ location_details=resp.json_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py
new file mode 100644
index 000000000..e895472ea
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py
@@ -0,0 +1,778 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_device_mgmt_network
+short_description: Configure network settings of devices on OpenManage Enterprise Modular
+description: This module allows to configure network settings on Chassis, Servers, and I/O Modules on OpenManage Enterprise Modular.
+version_added: 4.2.0
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_service_tag:
+ description:
+ - Service tag of the device.
+ - This option is mutually exclusive with I(device_id).
+ type: str
+ device_id:
+ description:
+ - ID of the device.
+ - This option is mutually exclusive with I(device_service_tag).
+ type: int
+ enable_nic:
+ description:
+ - Enable or disable Network Interface Card (NIC) configuration of the device.
+ - This option is not applicable to I/O Module.
+ type: bool
+ default: true
+ delay:
+ description:
+ - The time in seconds, after which settings are applied.
+ - This option is applicable only for Chassis.
+ type: int
+ default: 0
+ ipv4_configuration:
+ description:
+ - IPv4 network configuration.
+ - "C(WARNING) Ensure that you have an alternate interface to access OpenManage Enterprise Modular because these
+ options can change the current IPv4 address for I(hostname)."
+ type: dict
+ suboptions:
+ enable_ipv4:
+ description:
+ - Enable or disable access to the network using IPv4.
+ type: bool
+ required: true
+ enable_dhcp:
+ description:
+ - "Enable or disable the automatic request to obtain an IPv4 address from the IPv4 Dynamic Host Configuration
+ Protocol (DHCP) server."
+ - "C(NOTE) If this option is C(True), the values provided for I(static_ip_address), I(static_subnet_mask),
+ and I(static_gateway) are not applied for these fields. However, the module may report changes."
+ type: bool
+ static_ip_address:
+ description:
+ - Static IPv4 address
+ - This option is applicable when I(enable_dhcp) is false.
+ type: str
+ static_subnet_mask:
+ description:
+ - Static IPv4 subnet mask address
+ - This option is applicable when I(enable_dhcp) is false.
+ type: str
+ static_gateway:
+ description:
+ - Static IPv4 gateway address
+ - This option is applicable when I(enable_dhcp) is false.
+ type: str
+ use_dhcp_to_obtain_dns_server_address:
+ description:
+ - This option allows to automatically request and obtain IPv4 address for the DNS Server from the DHCP server.
+ - This option is applicable when I(enable_dhcp) is true.
+ - "C(NOTE) If this option is C(True), the values provided for I(static_preferred_dns_server) and
+ I(static_alternate_dns_server) are not applied for these fields. However, the module may report changes."
+ type: bool
+ static_preferred_dns_server:
+ description:
+ - Static IPv4 DNS preferred server
+ - This option is applicable when I(use_dhcp_for_dns_server_names) is false.
+ type: str
+ static_alternate_dns_server:
+ description:
+ - Static IPv4 DNS alternate server
+ - This option is applicable when I(use_dhcp_for_dns_server_names) is false.
+ type: str
+ ipv6_configuration:
+ description:
+ - IPv6 network configuration.
+ - "C(WARNING) Ensure that you have an alternate interface to access OpenManage Enterprise Modular because these options can
+ change the current IPv6 address for I(hostname)."
+ type: dict
+ suboptions:
+ enable_ipv6:
+ description: Enable or disable access to the network using the IPv6.
+ type: bool
+ required: true
+ enable_auto_configuration:
+ description:
+ - "Enable or disable the automatic request to obtain an IPv6 address from the IPv6 DHCP server or router
+ advertisements(RA)"
+ - "If I(enable_auto_configuration) is C(true), OpenManage Enterprise Modular retrieves IP configuration
+ (IPv6 address, prefix, and gateway address) from a DHCPv6 server on the existing network."
+ - "C(NOTE) If this option is C(True), the values provided for I(static_ip_address), I(static_prefix_length),
+ and I(static_gateway) are not applied for these fields. However, the module may report changes."
+ type: bool
+ static_ip_address:
+ description:
+ - Static IPv6 address
+ - This option is applicable when I(enable_auto_configuration) is false.
+ type: str
+ static_prefix_length:
+ description:
+ - Static IPv6 prefix length
+ - This option is applicable when I(enable_auto_configuration) is false.
+ type: int
+ static_gateway:
+ description:
+ - Static IPv6 gateway address
+ - This option is applicable when I(enable_auto_configuration) is false.
+ type: str
+ use_dhcpv6_to_obtain_dns_server_address:
+ description:
+ - This option allows to automatically request and obtain a IPv6 address for the DNS server from the DHCP server.
+ - This option is applicable when I(enable_auto_configuration) is true
+ - "C(NOTE) If this option is C(True), the values provided for I(static_preferred_dns_server) and I(static_alternate_dns_server)
+ are not applied for these fields. However, the module may report changes."
+ type: bool
+ static_preferred_dns_server:
+ description:
+ - Static IPv6 DNS preferred server
+ - This option is applicable when I(use_dhcp_for_dns_server_names) is false.
+ type: str
+ static_alternate_dns_server:
+ description:
+ - Static IPv6 DNS alternate server
+ - This option is applicable when I(use_dhcp_for_dns_server_names) is false.
+ type: str
+ management_vlan:
+ description:
+ - VLAN configuration.
+ type: dict
+ suboptions:
+ enable_vlan:
+ description:
+ - Enable or disable VLAN for management.
+ - The VLAN configuration cannot be updated if the I(register_with_dns) field under I(dns_configuration) is true.
+ - "C(WARNING) Ensure that the network cable is connected to the correct port after the VLAN configuration
+ is changed. If not, the VLAN configuration changes may not be applied."
+ required: true
+ type: bool
+ vlan_id:
+ description:
+ - VLAN ID.
+ - "The valid VLAN IDs are: 1 to 4000, and 4021 to 4094."
+ - This option is applicable when I(enable_vlan) is true.
+ type: int
+ dns_configuration:
+ description: Domain Name System(DNS) settings.
+ type: dict
+ suboptions:
+ register_with_dns:
+ description:
+ - Register/Unregister I(dns_name) on the DNS Server.
+ - C(WARNING) This option cannot be updated if VLAN configuration changes.
+ type: bool
+ use_dhcp_for_dns_domain_name:
+ description: Get the I(dns_domain_name) using a DHCP server.
+ type: bool
+ dns_name:
+ description:
+ - DNS name for I(hostname)
+ - This is applicable when I(register_with_dns) is true.
+ type: str
+ dns_domain_name:
+ description:
+ - Static DNS domain name
+ - This is applicable when I(use_dhcp_for_dns_domain_name) is false.
+ type: str
+ auto_negotiation:
+ description:
+ - Enables or disables the auto negation of the network speed.
+ - "C(NOTE): Setting I(auto_negotiation) to false and choosing a network port speed may result in the chassis
+ loosing link to the top of rack network switch, or to the neighboring chassis in case of MCM mode. It is
+ recommended that the I(auto_negotiation) is set to C(true) for most use cases."
+ - This is applicable when I(use_dhcp_for_dns_domain_name) is false.
+ - This is applicable only for Chassis.
+ type: bool
+ network_speed:
+ description:
+ - The speed of the network port.
+ - This is applicable when I(auto_negotiation) is false.
+ - C(10_MB) to select network speed of 10 MB.
+ - C(100_MB) to select network speed of 100 MB.
+ - This is applicable only for Chassis.
+ choices:
+ - 10_MB
+ - 100_MB
+ type: str
+ dns_server_settings:
+ description:
+ - DNS server settings.
+ - This is applicable only for I/O Module.
+ type: dict
+ suboptions:
+ preferred_dns_server:
+ description:
+ - Enter the IP address of the preferred DNS server.
+ type: str
+ alternate_dns_server1:
+ description:
+ - Enter the IP address of the first alternate DNS server.
+ type: str
+ alternate_dns_server2:
+ description:
+ - Enter the IP address of the second alternate DNS server.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Network settings for chassis
+ dellemc.openmanage.ome_device_mgmt_network:
+ hostname: 192.168.0.1
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: CHAS123
+ ipv4_configuration:
+ enable_ipv4: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_to_obtain_dns_server_address: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ ipv6_configuration:
+ enable_ipv6: true
+ enable_auto_configuration: false
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: ffff::2607:f2b1:f081:9
+ use_dhcpv6_to_obtain_dns_server_address: false
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+ dns_configuration:
+ register_with_dns: true
+ use_dhcp_for_dns_domain_name: false
+ dns_name: "MX-SVCTAG"
+ dns_domain_name: "dnslocaldomain"
+ auto_negotiation: no
+ network_speed: 100_MB
+
+- name: Network settings for server
+ dellemc.openmanage.ome_device_mgmt_network:
+ hostname: 192.168.0.1
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: SRVR123
+ ipv4_configuration:
+ enable_ipv4: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_to_obtain_dns_server_address: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ ipv6_configuration:
+ enable_ipv6: true
+ enable_auto_configuration: false
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: ffff::2607:f2b1:f081:9
+ use_dhcpv6_to_obtain_dns_server_address: false
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+
+- name: Network settings for I/O module
+ dellemc.openmanage.ome_device_mgmt_network:
+ hostname: 192.168.0.1
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: IOM1234
+ ipv4_configuration:
+ enable_ipv4: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ ipv6_configuration:
+ enable_ipv6: true
+ enable_auto_configuration: false
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: ffff::2607:f2b1:f081:9
+ dns_server_settings:
+ preferred_dns_server: 192.168.0.4
+ alternate_dns_server1: 192.168.0.5
+
+- name: Management VLAN configuration of chassis using device id
+ dellemc.openmanage.ome_device_mgmt_network:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id : 12345
+ management_vlan:
+ enable_vlan: true
+ vlan_id: 2345
+ dns_configuration:
+ register_with_dns: false
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the network config operation.
+ returned: always
+ sample: Successfully applied the network settings.
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1004",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because IPV4 Settings Capability is not Supported does not
+ exist or is not applicable for the resource URI.",
+ "MessageArgs": [
+ "IPV4 Settings Capability is not Supported"
+ ],
+ "Severity": "Critical",
+ "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide
+ for more information about resource URI and its properties."
+ }
+ ]
+ }
+}
+"""
+
+import json
+import socket
+import copy
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.common.dict_transformations import recursive_diff
+
+DEVICE_URI = "DeviceService/Devices"
+MGMT_DOMAIN = "ManagementDomainService/Domains"
+LEAD_CONFIG = "ApplicationService/Network/AddressConfiguration"
+NETWORK_SETTINGS = "DeviceService/Devices({0})/Settings('Network')"
+DEVICE_NOT_FOUND = "Device with {0} '{1}' not found."
+NON_CONFIG_NETWORK = "Network settings for {0} is not configurable."
+SUCCESS_MSG = "Successfully applied the network settings."
+INVALID_IP = "Invalid {0} address provided for the {1}"
+DNS_SETT_ERR1 = "'SecondaryDNS' requires 'PrimaryDNS' to be provided."
+DNS_SETT_ERR2 = "'TertiaryDNS' requires both 'PrimaryDNS' and 'SecondaryDNS' to be provided."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+SERVER = 1000
+CHASSIS = 2000
+IO_MODULE = 4000
+API_TIMEOUT = 120
+
+
+def validate_ip_address(address):
+ try:
+ socket.inet_aton(address)
+ except socket.error:
+ return False
+ return address.count('.') == 3
+
+
+def validate_ip_v6_address(address):
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ return False
+ return True
+
+
+def validate_ipaddress(module, ip_type, config, var_list, ip_func):
+ ipv_input = module.params.get(config)
+ if ipv_input:
+ for ipname in var_list:
+ val = ipv_input.get(ipname)
+ if val and not ip_func(val):
+ module.fail_json(msg=INVALID_IP.format(ip_type, ipname))
+ return
+
+
+def validate_input(module):
+ ip_addr = ["static_ip_address", "static_gateway", "static_preferred_dns_server", "static_alternate_dns_server"]
+ validate_ipaddress(module, "IPv6", "ipv6_configuration", ip_addr, validate_ip_v6_address)
+ ip_addr.append("static_subnet_mask")
+ validate_ipaddress(module, "IPv4", "ipv4_configuration", ip_addr, validate_ip_address)
+ ipv6 = module.params.get("ipv6_configuration")
+ dns_settings = module.params.get("dns_server_settings")
+ if dns_settings:
+ for k, v in dns_settings.items():
+ if v is not None:
+ if not validate_ip_address(v) and not validate_ip_v6_address(v):
+ module.fail_json(msg=INVALID_IP.format("IP", k))
+ # int to str
+ if ipv6 and ipv6.get("static_prefix_length"):
+ ipv6["static_prefix_length"] = str(ipv6["static_prefix_length"])
+ vlan = module.params.get("management_vlan")
+ if vlan and vlan.get("vlan_id"):
+ vlan["vlan_id"] = str(vlan["vlan_id"])
+ return
+
+
+def get_device_details(module, rest_obj):
+ id = module.params.get('device_id')
+ srch = 'Id'
+ query_param = {"$filter": "{0} eq {1}".format(srch, id)}
+ if not id:
+ id = module.params.get('device_service_tag')
+ srch = 'Identifier'
+ query_param = {"$filter": "{0} eq '{1}'".format(srch, id)}
+ resp = rest_obj.invoke_request('GET', DEVICE_URI, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ dvc = xtype
+ return dvc
+ module.fail_json(msg=DEVICE_NOT_FOUND.format(srch, id))
+
+
+def transform_diff(params, translator, sub_payload, bool_trans=None):
+ df = {}
+ inp_dict = {}
+ for k, v in translator.items():
+ inp = params.get(k)
+ if inp is not None:
+ if isinstance(inp, bool) and bool_trans:
+ inp = bool_trans.get(inp)
+ inp_dict[v] = inp
+ id_diff = recursive_diff(inp_dict, sub_payload)
+ if id_diff and id_diff[0]:
+ df = id_diff[0]
+ sub_payload.update(inp_dict)
+ return df
+
+
+def validate_dependency(mparams):
+ params = copy.deepcopy(mparams)
+ ipv4 = params.get('ipv4_configuration')
+ if ipv4:
+ rm_list = []
+ dhcp = ["static_preferred_dns_server", "static_alternate_dns_server"]
+ static = ["static_ip_address", "static_gateway", "static_subnet_mask"]
+ bools = ["enable_dhcp", "use_dhcp_to_obtain_dns_server_address"]
+ if ipv4.get("use_dhcp_to_obtain_dns_server_address") is True:
+ rm_list.extend(dhcp)
+ if ipv4.get("enable_dhcp") is True:
+ rm_list.extend(static)
+ if ipv4.get("enable_ipv4") is False:
+ rm_list.extend(dhcp)
+ rm_list.extend(static)
+ rm_list.extend(bools)
+ for prm in rm_list:
+ ipv4.pop(prm, None)
+ ipv6 = params.get('ipv6_configuration')
+ if ipv6:
+ rm_list = []
+ dhcp = ["static_preferred_dns_server", "static_alternate_dns_server"]
+ static = ["static_ip_address", "static_gateway", "static_prefix_length"]
+ bools = ["enable_auto_configuration", "use_dhcpv6_to_obtain_dns_server_address"]
+ if ipv6.get("use_dhcpv6_to_obtain_dns_server_address") is True:
+ rm_list.extend(dhcp)
+ if ipv6.get("enable_auto_configuration") is True:
+ rm_list.extend(static)
+ if ipv6.get("enable_ipv6") is False:
+ rm_list.extend(dhcp)
+ rm_list.extend(static)
+ rm_list.extend(bools)
+ for prm in rm_list:
+ ipv6.pop(prm, None)
+ vlan = params.get('management_vlan')
+ if vlan:
+ if vlan.get('enable_vlan') is False:
+ vlan.pop('vlan_id', None)
+ dns = params.get('dns_configuration')
+ if dns:
+ if dns.get('auto_negotiation') is True:
+ dns.pop('network_speed', None)
+ if dns.get('use_dhcp_for_dns_domain_name') is True:
+ dns.pop('dns_domain_name', None)
+ return params
+
+
+def update_chassis_payload(module, payload):
+ ipv4 = {
+ "enable_dhcp": "EnableDHCP",
+ "enable_ipv4": "EnableIPv4",
+ "static_alternate_dns_server": "StaticAlternateDNSServer",
+ "static_gateway": "StaticGateway",
+ "static_ip_address": "StaticIPAddress",
+ "static_preferred_dns_server": "StaticPreferredDNSServer",
+ "static_subnet_mask": "StaticSubnetMask",
+ "use_dhcp_to_obtain_dns_server_address": "UseDHCPObtainDNSServerAddresses"
+ }
+ ipv6 = {
+ "enable_auto_configuration": "EnableAutoconfiguration",
+ "enable_ipv6": "EnableIPv6",
+ "static_alternate_dns_server": "StaticAlternateDNSServer",
+ "static_gateway": "StaticGateway",
+ "static_ip_address": "StaticIPv6Address",
+ "static_preferred_dns_server": "StaticPreferredDNSServer",
+ "static_prefix_length": "StaticPrefixLength",
+ "use_dhcpv6_to_obtain_dns_server_address": "UseDHCPv6ObtainDNSServerAddresses"
+ }
+ dns = {
+ "auto_negotiation": "AutoNegotiation",
+ "dns_domain_name": "DnsDomainName",
+ "dns_name": "DnsName",
+ "network_speed": "NetworkSpeed",
+ "register_with_dns": "RegisterDNS",
+ "use_dhcp_for_dns_domain_name": "UseDHCPForDomainName"
+ }
+ vlan = {"enable_vlan": "EnableVLAN", "vlan_id": "MgmtVLANId"}
+ gnrl = payload.get('GeneralSettings') # where enable NIC is present
+ diff = {}
+ mparams = validate_dependency(module.params)
+ enable_nic = mparams.get('enable_nic')
+ delay = mparams.get('delay')
+ if enable_nic:
+ if mparams.get('ipv4_configuration'):
+ df = transform_diff(mparams.get('ipv4_configuration'), ipv4, payload.get('Ipv4Settings'))
+ diff.update(df)
+ if mparams.get('ipv6_configuration'):
+ df = transform_diff(mparams.get('ipv6_configuration'), ipv6, payload.get('Ipv6Settings'))
+ diff.update(df)
+ if mparams.get('dns_configuration'):
+ df = transform_diff(mparams.get('dns_configuration'), dns, payload.get('GeneralSettings'))
+ diff.update(df)
+ if mparams.get('management_vlan'):
+ df = transform_diff(mparams.get('management_vlan'), vlan, payload)
+ diff.update(df)
+ if gnrl.get('EnableNIC') != enable_nic:
+ gnrl['EnableNIC'] = enable_nic
+ diff.update({'EnableNIC': enable_nic})
+ if delay != gnrl.get('Delay'):
+ gnrl['Delay'] = delay
+ diff.update({'Delay': delay})
+ return diff
+
+
+def update_server_payload(module, payload):
+ ipv4 = {
+ "enable_dhcp": "enableDHCPIPv4",
+ "enable_ipv4": "enableIPv4",
+ "static_alternate_dns_server": "staticAlternateDNSIPv4",
+ "static_gateway": "staticGatewayIPv4",
+ "static_ip_address": "staticIPAddressIPv4",
+ "static_preferred_dns_server": "staticPreferredDNSIPv4",
+ "static_subnet_mask": "staticSubnetMaskIPv4",
+ "use_dhcp_to_obtain_dns_server_address": "useDHCPToObtainDNSIPv4"
+ }
+ ipv6 = {
+ "enable_auto_configuration": "enableAutoConfigurationIPv6",
+ "enable_ipv6": "enableIPv6",
+ "static_alternate_dns_server": "staticAlternateDNSIPv6",
+ "static_gateway": "staticGatewayIPv6",
+ "static_ip_address": "staticIPAddressIPv6",
+ "static_preferred_dns_server": "staticPreferredDNSIPv6",
+ "static_prefix_length": "staticPrefixLengthIPv6",
+ "use_dhcpv6_to_obtain_dns_server_address": "useDHCPToObtainDNSIPv6"
+ }
+ vlan = {"enable_vlan": "vlanEnable", "vlan_id": "vlanId"}
+ diff = {}
+ mparams = validate_dependency(module.params)
+ enable_nic = mparams.get('enable_nic')
+ bool_trans = {True: 'Enabled', False: 'Disabled'}
+ if enable_nic:
+ if mparams.get('ipv4_configuration'):
+ df = transform_diff(mparams.get('ipv4_configuration'), ipv4, payload, bool_trans)
+ diff.update(df)
+ if mparams.get('ipv6_configuration'):
+ df = transform_diff(mparams.get('ipv6_configuration'), ipv6, payload, bool_trans)
+ diff.update(df)
+ if mparams.get('management_vlan'):
+ df = transform_diff(mparams.get('management_vlan'), vlan, payload, bool_trans)
+ diff.update(df)
+ enable_nic = bool_trans.get(enable_nic)
+ if payload.get('enableNIC') != enable_nic:
+ payload['enableNIC'] = enable_nic
+ diff.update({'enableNIC': enable_nic})
+ return diff
+
+
+def update_iom_payload(module, payload):
+ ipv4 = {
+ "enable_dhcp": "EnableDHCP",
+ "enable_ipv4": "EnableIPv4",
+ "static_gateway": "StaticGateway",
+ "static_ip_address": "StaticIPAddress",
+ "static_subnet_mask": "StaticSubnetMask",
+ }
+ ipv6 = {
+ "enable_ipv6": "EnableIPv6",
+ "static_gateway": "StaticGateway",
+ "static_ip_address": "StaticIPv6Address",
+ "static_prefix_length": "StaticPrefixLength",
+ "enable_auto_configuration": "UseDHCPv6"
+ }
+ dns = {"preferred_dns_server": "PrimaryDNS",
+ "alternate_dns_server1": "SecondaryDNS",
+ "alternate_dns_server2": "TertiaryDNS"}
+ vlan = {"enable_vlan": "EnableMgmtVLANId", "vlan_id": "MgmtVLANId"}
+ diff = {}
+ mparams = validate_dependency(module.params)
+ if mparams.get('ipv4_configuration'):
+ df = transform_diff(mparams.get('ipv4_configuration'), ipv4, payload.get('IomIPv4Settings'))
+ diff.update(df)
+ if mparams.get('ipv6_configuration'):
+ df = transform_diff(mparams.get('ipv6_configuration'), ipv6, payload.get('IomIPv6Settings'))
+ diff.update(df)
+ if mparams.get('management_vlan'):
+ df = transform_diff(mparams.get('management_vlan'), vlan, payload)
+ diff.update(df)
+ if mparams.get('dns_server_settings'):
+ df = transform_diff(mparams.get('dns_server_settings'), dns, payload.get('IomDNSSettings'))
+ dns_iom = payload.get('IomDNSSettings')
+ if dns_iom.get("SecondaryDNS") and not dns_iom.get("PrimaryDNS"):
+ module.fail_json(msg=DNS_SETT_ERR1)
+ if dns_iom.get("TertiaryDNS") and (not dns_iom.get("PrimaryDNS") or not dns_iom.get("SecondaryDNS")):
+ module.fail_json(msg=DNS_SETT_ERR2)
+ diff.update(df)
+ return diff
+
+
+def get_network_payload(module, rest_obj, dvc):
+ resp = rest_obj.invoke_request('GET', NETWORK_SETTINGS.format(dvc.get('Id')))
+ got_payload = resp.json_data
+ payload = rest_obj.strip_substr_dict(got_payload)
+ update_dict = {
+ CHASSIS: update_chassis_payload,
+ SERVER: update_server_payload,
+ IO_MODULE: update_iom_payload
+ }
+ diff = update_dict[dvc.get('Type')](module, payload)
+ # module.warn(json.dumps(diff))
+ if not diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ return payload
+
+
+def main():
+ ipv4_options = {"enable_ipv4": {"required": True, "type": 'bool'},
+ "enable_dhcp": {"type": 'bool'},
+ "static_ip_address": {"type": 'str'},
+ "static_subnet_mask": {"type": 'str'},
+ "static_gateway": {"type": 'str'},
+ "use_dhcp_to_obtain_dns_server_address": {"type": 'bool'},
+ "static_preferred_dns_server": {"type": 'str'},
+ "static_alternate_dns_server": {"type": 'str'}}
+ ipv6_options = {"enable_ipv6": {"required": True, "type": 'bool'},
+ "enable_auto_configuration": {"type": 'bool'},
+ "static_ip_address": {"type": 'str'},
+ "static_prefix_length": {"type": 'int'},
+ "static_gateway": {"type": 'str'},
+ "use_dhcpv6_to_obtain_dns_server_address": {"type": 'bool'},
+ "static_preferred_dns_server": {"type": 'str'},
+ "static_alternate_dns_server": {"type": 'str'}}
+ dns_options = {"register_with_dns": {"type": 'bool'},
+ "use_dhcp_for_dns_domain_name": {"type": 'bool'},
+ "dns_name": {"type": 'str'},
+ "dns_domain_name": {"type": 'str'},
+ "auto_negotiation": {"type": 'bool'},
+ "network_speed": {"type": 'str', "choices": ['10_MB', '100_MB']}}
+ management_vlan = {"enable_vlan": {"required": True, "type": 'bool'},
+ "vlan_id": {"type": 'int'}}
+ dns_server_settings = {"preferred_dns_server": {"type": 'str'},
+ "alternate_dns_server1": {"type": 'str'},
+ "alternate_dns_server2": {"type": 'str'}}
+ specs = {
+ "enable_nic": {"type": 'bool', "default": True},
+ "device_id": {"type": 'int'},
+ "device_service_tag": {"type": 'str'},
+ "delay": {"type": 'int', "default": 0},
+ "ipv4_configuration":
+ {"type": "dict", "options": ipv4_options,
+ "required_if": [
+ ['enable_ipv4', True, ('enable_dhcp',), True],
+ ['enable_dhcp', False, ('static_ip_address', 'static_subnet_mask', "static_gateway"), False],
+ ['use_dhcp_to_obtain_dns_server_address', False,
+ ('static_preferred_dns_server', 'static_alternate_dns_server'), True]]
+ },
+ "ipv6_configuration":
+ {"type": "dict", "options": ipv6_options,
+ "required_if": [
+ ['enable_ipv6', True, ('enable_auto_configuration',), True],
+ ['enable_auto_configuration', False,
+ ('static_ip_address', 'static_prefix_length', "static_gateway"), False],
+ ['use_dhcpv6_to_obtain_dns_server_address', False,
+ ('static_preferred_dns_server', 'static_alternate_dns_server'), True]]
+ },
+ "dns_configuration":
+ {"type": "dict", "options": dns_options,
+ "required_if": [
+ ['register_with_dns', True, ('dns_name',), False],
+ ['use_dhcp_for_dns_domain_name', False, ('dns_domain_name',)],
+ ['auto_negotiation', False, ('network_speed',)]]
+ },
+ "management_vlan":
+ {"type": "dict", "options": management_vlan,
+ "required_if": [
+ ['enable_vlan', True, ('vlan_id',), True]]
+ },
+ "dns_server_settings":
+ {"type": "dict", "options": dns_server_settings,
+ "required_one_of": [("preferred_dns_server", "alternate_dns_server1", "alternate_dns_server2")]
+ }
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[('device_id', 'device_service_tag')],
+ mutually_exclusive=[('device_id', 'device_service_tag')],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ validate_input(module)
+ dvc = get_device_details(module, rest_obj)
+ if dvc.get('Type') in [SERVER, CHASSIS, IO_MODULE]:
+ nw_setting = get_network_payload(module, rest_obj, dvc)
+ resp = rest_obj.invoke_request('PUT', NETWORK_SETTINGS.format(dvc.get('Id')),
+ data=nw_setting, api_timeout=API_TIMEOUT)
+ module.exit_json(msg=SUCCESS_MSG, network_details=resp.json_data, changed=True)
+ else:
+ module.fail_json(msg=NON_CONFIG_NETWORK.format(dvc.get('Model')))
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py
new file mode 100644
index 000000000..81475d48b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py
@@ -0,0 +1,398 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_device_network_services
+short_description: Configure chassis network services settings on OpenManage Enterprise Modular
+description: This module allows to configure the network services on OpenManage Enterprise Modular.
+version_added: "4.3.0"
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_id:
+ type: int
+ description:
+ - The ID of the chassis for which the settings need to be updated.
+ - If the device ID is not specified, this module updates the network services settings for the I(hostname).
+ - I(device_id) is mutually exclusive with I(device_service_tag).
+ device_service_tag:
+ type: str
+ description:
+ - The service tag of the chassis for which the setting needs to be updated.
+ - If the device service tag is not specified, this module updates the network
+ services settings for the I(hostname).
+ - I(device_service_tag) is mutually exclusive with I(device_id).
+ snmp_settings:
+ type: dict
+ description: The settings for SNMP configuration.
+ suboptions:
+ enabled:
+ type: bool
+ required: true
+ description: Enables or disables the SNMP settings.
+ port_number:
+ type: int
+ description: The SNMP port number.
+ community_name:
+ type: str
+ description:
+ - The SNMP community string.
+ - Required when I(enabled) is C(true).
+ ssh_settings:
+ type: dict
+ description: The settings for SSH configuration.
+ suboptions:
+ enabled:
+ required: true
+ type: bool
+ description: Enables or disables the SSH settings.
+ port_number:
+ type: int
+ description: The port number for SSH service.
+ max_sessions:
+ type: int
+ description: Number of SSH sessions.
+ max_auth_retries:
+ type: int
+ description: The number of retries when the SSH session fails.
+ idle_timeout:
+ type: float
+ description: SSH idle timeout in minutes.
+ remote_racadm_settings:
+ type: dict
+ description: The settings for remote RACADM configuration.
+ suboptions:
+ enabled:
+ type: bool
+ required: true
+ description: Enables or disables the remote RACADM settings.
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Update network services settings of a chassis using the device ID
+ dellemc.openmanage.ome_device_network_services:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ snmp_settings:
+ enabled: true
+ port_number: 161
+ community_name: public
+ ssh_settings:
+ enabled: false
+ remote_racadm_settings:
+ enabled: false
+
+- name: Update network services settings of a chassis using the device service tag.
+ dellemc.openmanage.ome_device_network_services:
+ hostname: "192.168.0.2"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ snmp_settings:
+ enabled: false
+ ssh_settings:
+ enabled: true
+ port_number: 22
+ max_sessions: 1
+ max_auth_retries: 3
+ idle_timeout: 1
+ remote_racadm_settings:
+ enabled: false
+
+- name: Update network services settings of the host chassis.
+ dellemc.openmanage.ome_device_network_services:
+ hostname: "192.168.0.3"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ snmp_settings:
+ enabled: false
+ ssh_settings:
+ enabled: false
+ remote_racadm_settings:
+ enabled: true
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the network services settings.
+ returned: always
+ sample: "Successfully updated the network services settings."
+network_services_details:
+ type: dict
+ description: returned when network services settings are updated successfully.
+ returned: success
+ sample: {
+ "EnableRemoteRacadm": true,
+ "SettingType": "NetworkServices",
+ "SnmpConfiguration": {
+ "PortNumber": 161,
+ "SnmpEnabled": true,
+ "SnmpV1V2Credential": {
+ "CommunityName": "public"
+ }
+ },
+ "SshConfiguration": {
+ "IdleTimeout": 60,
+ "MaxAuthRetries": 3,
+ "MaxSessions": 1,
+ "PortNumber": 22,
+ "SshEnabled": false
+ }
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CAPP1042",
+ "RelatedProperties": [],
+ "Message": "Unable to update the network configuration because the SNMP PortNumber is already in use.",
+ "MessageArgs": ["SNMP PortNumber"],
+ "Severity": "Informational",
+ "Resolution": "Enter a different port number and retry the operation.",
+ }
+ ]
+ }
+ }
+"""
+
+
+import json
+import socket
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+DOMAIN_URI = "ManagementDomainService/Domains"
+DEVICE_URI = "DeviceService/Devices"
+NETWORK_SERVICE_API = "DeviceService/Devices({0})/Settings('NetworkServices')"
+CONFIG_FAIL_MSG = "one of the following is required: snmp_settings, ssh_settings, remote_racadm_settings"
+DOMAIN_FAIL_MSG = "The device location settings operation is supported only on " \
+ "OpenManage Enterprise Modular."
+FETCH_FAIL_MSG = "Failed to retrieve the device information."
+DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid."
+NETWORK_SERVICE_FAIL_MSG = "Unable to complete the operation because the network services settings " \
+ "are not supported on the specified device."
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+SUCCESS_MSG = "Successfully updated the network services settings."
+
+
+def check_domain_service(module, rest_obj):
+ try:
+ rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5)
+ except HTTPError as err:
+ err_message = json.loads(err)
+ if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
+ module.fail_json(msg=DOMAIN_FAIL_MSG)
+ return
+
+
+def get_ip_from_host(hostname):
+ ipaddr = hostname
+ try:
+ result = socket.getaddrinfo(hostname, None)
+ last_element = result[-1]
+ ip_address = last_element[-1][0]
+ if ip_address:
+ ipaddr = ip_address
+ except socket.gaierror:
+ ipaddr = hostname
+ except Exception:
+ ipaddr = hostname
+ return ipaddr
+
+
+def get_chassis_device(module, rest_obj):
+ key, value = None, None
+ ipaddress = get_ip_from_host(module.params["hostname"])
+ resp = rest_obj.invoke_request("GET", DOMAIN_URI)
+ for data in resp.json_data["value"]:
+ if ipaddress in data["PublicAddress"]:
+ key, value = ("Id", data["DeviceId"])
+ break
+ else:
+ module.fail_json(msg=FETCH_FAIL_MSG)
+ return key, value
+
+
+def check_mode_validation(module, loc_data, rest_obj):
+ req_snmp, req_ssh, req_comm_str, req_racadm = {}, {}, {}, {}
+ exist_snmp, exist_ssh, exist_comm_str, exist_racadm = {}, {}, {}, {}
+ payload = {"SettingType": "NetworkServices"}
+ snmp_enabled = module.params.get("snmp_settings")
+ if snmp_enabled is not None and snmp_enabled["enabled"] is True:
+ req_snmp.update({"SnmpEnabled": snmp_enabled["enabled"]})
+ req_comm_str.update({"CommunityName": module.params["snmp_settings"]["community_name"]})
+ exist_snmp.update({"SnmpEnabled": loc_data["SnmpConfiguration"]["SnmpEnabled"]})
+ exist_comm_str.update({"CommunityName": loc_data["SnmpConfiguration"]["SnmpV1V2Credential"]["CommunityName"]})
+ elif snmp_enabled is not None and snmp_enabled["enabled"] is False:
+ req_snmp.update({"SnmpEnabled": snmp_enabled["enabled"]})
+ exist_snmp.update({"SnmpEnabled": loc_data["SnmpConfiguration"]["SnmpEnabled"]})
+
+ if snmp_enabled is not None and snmp_enabled["enabled"] is True and snmp_enabled.get("port_number") is not None:
+ req_snmp.update({"PortNumber": snmp_enabled.get("port_number")})
+ exist_snmp.update({"PortNumber": loc_data["SnmpConfiguration"]["PortNumber"]})
+ ssh_enabled = module.params.get("ssh_settings")
+ if ssh_enabled is not None and ssh_enabled["enabled"] is True:
+ req_ssh.update({"SshEnabled": ssh_enabled["enabled"]})
+ exist_ssh.update({"SshEnabled": loc_data["SshConfiguration"]["SshEnabled"]})
+ elif ssh_enabled is not None and ssh_enabled["enabled"] is False:
+ req_ssh.update({"SshEnabled": ssh_enabled["enabled"]})
+ exist_ssh.update({"SshEnabled": loc_data["SshConfiguration"]["SshEnabled"]})
+
+ if ssh_enabled is not None and ssh_enabled["enabled"] is True and ssh_enabled.get("port_number") is not None:
+ req_ssh.update({"PortNumber": module.params["ssh_settings"]["port_number"]})
+ exist_ssh.update({"PortNumber": loc_data["SshConfiguration"]["PortNumber"]})
+ if ssh_enabled is not None and ssh_enabled["enabled"] is True and ssh_enabled.get("max_sessions") is not None:
+ req_ssh.update({"MaxSessions": module.params["ssh_settings"]["max_sessions"]})
+ exist_ssh.update({"MaxSessions": loc_data["SshConfiguration"]["MaxSessions"]})
+ if ssh_enabled is not None and ssh_enabled["enabled"] is True and ssh_enabled.get("max_auth_retries") is not None:
+ req_ssh.update({"MaxAuthRetries": module.params["ssh_settings"]["max_auth_retries"]})
+ exist_ssh.update({"MaxAuthRetries": loc_data["SshConfiguration"]["MaxAuthRetries"]})
+ if ssh_enabled is not None and ssh_enabled["enabled"] is True and ssh_enabled.get("idle_timeout") is not None:
+ req_ssh.update({"IdleTimeout": int(module.params["ssh_settings"]["idle_timeout"] * 60)})
+ exist_ssh.update({"IdleTimeout": int(loc_data["SshConfiguration"]["IdleTimeout"])})
+ recadm_enabled = module.params.get("remote_racadm_settings")
+ if recadm_enabled is not None and recadm_enabled["enabled"] is True:
+ req_racadm = {"EnableRemoteRacadm": recadm_enabled["enabled"]}
+ exist_racadm = {"EnableRemoteRacadm": loc_data["EnableRemoteRacadm"]}
+ elif recadm_enabled is not None and recadm_enabled["enabled"] is False:
+ req_racadm = {"EnableRemoteRacadm": recadm_enabled["enabled"]}
+ exist_racadm = {"EnableRemoteRacadm": loc_data["EnableRemoteRacadm"]}
+ changes = [bool(set(req_snmp.items()) ^ set(exist_snmp.items())) or
+ bool(set(req_ssh.items()) ^ set(exist_ssh.items())) or
+ bool(set(req_comm_str.items()) ^ set(exist_comm_str.items())) or
+ bool(set(req_racadm.items()) ^ set(exist_racadm.items()))]
+ if module.check_mode and any(changes) is True:
+ loc_data["SshConfiguration"]["IdleTimeout"] = loc_data["SshConfiguration"]["IdleTimeout"] / 60
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif module.check_mode and all(changes) is False:
+ loc_data["SshConfiguration"]["IdleTimeout"] = loc_data["SshConfiguration"]["IdleTimeout"] / 60
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ elif not module.check_mode and all(changes) is False:
+ loc_data["SshConfiguration"]["IdleTimeout"] = loc_data["SshConfiguration"]["IdleTimeout"] / 60
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ payload.update(loc_data)
+ payload["SnmpConfiguration"].update(req_snmp) if req_snmp else None
+ payload["SnmpConfiguration"]["SnmpV1V2Credential"].update(req_comm_str) if req_comm_str else None
+ payload["SshConfiguration"].update(req_ssh) if req_ssh else None
+ payload.update(req_racadm) if req_racadm else None
+ return payload
+
+
+def fetch_device_details(module, rest_obj):
+ device_id, tag, final_resp = module.params.get("device_id"), module.params.get("device_service_tag"), {}
+ if device_id is None and tag is None:
+ key, value = get_chassis_device(module, rest_obj)
+ device_id = value
+ else:
+ key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag)
+ param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value)
+ resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value})
+ resp_data = resp.json_data.get("value")
+ rename_key = "id" if key == "Id" else "service tag"
+ if not resp_data:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag:
+ device_id = resp_data[0]["Id"]
+ elif key == "Id" and resp_data[0]["Id"] == device_id:
+ device_id = resp_data[0]["Id"]
+ else:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ try:
+ loc_resp = rest_obj.invoke_request("GET", NETWORK_SERVICE_API.format(device_id))
+ except HTTPError as err:
+ if err.code == 404:
+ module.fail_json(msg=NETWORK_SERVICE_FAIL_MSG)
+ err_message = json.load(err)
+ error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo')
+ if error_msg and error_msg[0].get("MessageId") == "CGEN1004":
+ module.fail_json(msg=NETWORK_SERVICE_FAIL_MSG)
+ else:
+ loc_resp_data = rest_obj.strip_substr_dict(loc_resp.json_data)
+ payload = check_mode_validation(module, loc_resp_data, rest_obj)
+ final_resp = rest_obj.invoke_request("PUT", NETWORK_SERVICE_API.format(device_id), data=payload)
+ return final_resp
+
+
+def main():
+ snmp_options = {"enabled": {"type": "bool", "required": True},
+ "port_number": {"type": "int", "required": False},
+ "community_name": {"type": "str", "required": False}}
+ ssh_options = {"enabled": {"type": "bool", "required": True},
+ "port_number": {"type": "int", "required": False},
+ "max_sessions": {"type": "int", "required": False},
+ "max_auth_retries": {"type": "int", "required": False},
+ "idle_timeout": {"type": "float", "required": False}}
+ racadm_options = {"enabled": {"type": "bool", "required": True}}
+ specs = {
+ "device_id": {"required": False, "type": "int"},
+ "device_service_tag": {"required": False, "type": "str"},
+ "snmp_settings": {"type": "dict", "required": False, "options": snmp_options,
+ "required_if": [["enabled", True, ("community_name",)]]},
+ "ssh_settings": {"type": "dict", "required": False, "options": ssh_options},
+ "remote_racadm_settings": {"type": "dict", "required": False, "options": racadm_options},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[('device_id', 'device_service_tag')],
+ required_one_of=[["snmp_settings", "ssh_settings", "remote_racadm_settings"]],
+ supports_check_mode=True,
+ )
+ if not any([module.params.get("snmp_settings"), module.params.get("ssh_settings"),
+ module.params.get("remote_racadm_settings")]):
+ module.fail_json(msg=CONFIG_FAIL_MSG)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ check_domain_service(module, rest_obj)
+ resp = fetch_device_details(module, rest_obj)
+ resp_data = resp.json_data
+ resp_data["SshConfiguration"]["IdleTimeout"] = resp_data["SshConfiguration"]["IdleTimeout"] / 60
+ module.exit_json(msg=SUCCESS_MSG, network_services_details=resp_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py
new file mode 100644
index 000000000..ec99e693a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_device_power_settings
+short_description: Configure chassis power settings on OpenManage Enterprise Modular
+description: This module allows to configure the chassis power settings on OpenManage Enterprise Modular.
+version_added: "4.2.0"
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_id:
+ type: int
+ description:
+ - The ID of the chassis for which the settings need to be updated.
+ - If the device ID is not specified, this module updates the power settings for the I(hostname).
+ - I(device_id) is mutually exclusive with I(device_service_tag).
+ device_service_tag:
+ type: str
+ description:
+ - The service tag of the chassis for which the setting needs to be updated.
+ - If the device service tag is not specified, this module updates the power settings for the I(hostname).
+ - I(device_service_tag) is mutually exclusive with I(device_id).
+ power_configuration:
+ description: The settings for Power configuration.
+ type: dict
+ suboptions:
+ enable_power_cap:
+ type: bool
+ description: Enables or disables the Power Cap Settings.
+ required: true
+ power_cap:
+ type: int
+ description:
+ - The maximum power consumption limit of the device. Specify the consumption limit in Watts.
+ - This is required if I(enable_power_cap) is set to true.
+ redundancy_configuration:
+ description: The settings for Redundancy configuration.
+ type: dict
+ suboptions:
+ redundancy_policy:
+ type: str
+ description:
+ - The choices to configure the redundancy policy.
+ - C(NO_REDUNDANCY) no redundancy policy is used.
+ - C(GRID_REDUNDANCY) to distributes power by dividing the PSUs into two grids.
+ - C(PSU_REDUNDANCY) to distribute power between all the PSUs.
+ choices: ['NO_REDUNDANCY', 'GRID_REDUNDANCY', 'PSU_REDUNDANCY']
+ default: NO_REDUNDANCY
+ hot_spare_configuration:
+ description: The settings for Hot Spare configuration.
+ type: dict
+ suboptions:
+ enable_hot_spare:
+ type: bool
+ description: Enables or disables Hot Spare configuration to facilitate voltage regulation when power
+ utilized by the Power Supply Unit (PSU) is low.
+ required: true
+ primary_grid:
+ type: str
+ description:
+ - The choices for PSU grid.
+ - C(GRID_1) Hot Spare on Grid 1.
+ - C(GRID_2) Hot Spare on Grid 2.
+ choices: ['GRID_1', 'GRID_2']
+ default: GRID_1
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Update power configuration settings of a chassis using the device ID.
+ dellemc.openmanage.ome_device_power_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ power_configuration:
+ enable_power_cap: true
+ power_cap: 3424
+
+- name: Update redundancy configuration settings of a chassis using the device service tag.
+ dellemc.openmanage.ome_device_power_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ redundancy_configuration:
+ redundancy_policy: GRID_REDUNDANCY
+
+- name: Update hot spare configuration settings of a chassis using device ID.
+ dellemc.openmanage.ome_device_power_settings:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25012
+ hot_spare_configuration:
+ enable_hot_spare: true
+ primary_grid: GRID_1
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the device power settings.
+ returned: always
+ sample: "Successfully updated the power settings."
+power_details:
+ type: dict
+ description: returned when power settings are updated successfully.
+ returned: success
+ sample: {
+ "EnableHotSpare": true,
+ "EnablePowerCapSettings": true,
+ "MaxPowerCap": "3424",
+ "MinPowerCap": "3291",
+ "PowerCap": "3425",
+ "PrimaryGrid": "GRID_1",
+ "RedundancyPolicy": "NO_REDUNDANCY",
+ "SettingType": "Power"
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+import json
+import socket
+import copy
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+POWER_API = "DeviceService/Devices({0})/Settings('Power')"
+DEVICE_URI = "DeviceService/Devices"
+DOMAIN_URI = "ManagementDomainService/Domains"
+DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid."
+CONFIG_FAIL_MSG = "one of the following is required: power_configuration, " \
+ "redundancy_configuration, hot_spare_configuration"
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+SUCCESS_MSG = "Successfully updated the power settings."
+FETCH_FAIL_MSG = "Failed to fetch the device information."
+POWER_FAIL_MSG = "Unable to complete the operation because the power settings " \
+ "are not supported on the specified device."
+DOMAIN_FAIL_MSG = "The device location settings operation is supported only on " \
+ "OpenManage Enterprise Modular."
+
+
+def check_domain_service(module, rest_obj):
+ try:
+ rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5)
+ except HTTPError as err:
+ err_message = json.load(err)
+ if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
+ module.fail_json(msg=DOMAIN_FAIL_MSG)
+ return
+
+
+def get_ip_from_host(hostname):
+ ipaddr = hostname
+ try:
+ result = socket.getaddrinfo(hostname, None)
+ last_element = result[-1]
+ ip_address = last_element[-1][0]
+ if ip_address:
+ ipaddr = ip_address
+ except socket.gaierror:
+ ipaddr = hostname
+ except Exception:
+ ipaddr = hostname
+ return ipaddr
+
+
+def get_chassis_device(module, rest_obj):
+ key, value = None, None
+ ipaddress = get_ip_from_host(module.params["hostname"])
+ resp = rest_obj.invoke_request("GET", DOMAIN_URI)
+ for data in resp.json_data["value"]:
+ if ipaddress in data["PublicAddress"]:
+ key, value = ("Id", data["DeviceId"])
+ break
+ else:
+ module.fail_json(msg=FETCH_FAIL_MSG)
+ return key, value
+
+
+def check_mode_validation(module, loc_data):
+ power_data = {"PowerCap": loc_data.get("PowerCap"), "MinPowerCap": loc_data["MinPowerCap"],
+ "MaxPowerCap": loc_data["MaxPowerCap"], "RedundancyPolicy": loc_data.get("RedundancyPolicy"),
+ "EnablePowerCapSettings": loc_data["EnablePowerCapSettings"],
+ "EnableHotSpare": loc_data["EnableHotSpare"], "PrimaryGrid": loc_data.get("PrimaryGrid")}
+ cloned_data = copy.deepcopy(power_data)
+ if module.params.get("power_configuration") is not None:
+ if module.params["power_configuration"]["enable_power_cap"] is None:
+ module.fail_json(msg="missing parameter: enable_power_cap")
+ enable_power_cap = module.params["power_configuration"]["enable_power_cap"]
+ power_cap = module.params["power_configuration"].get("power_cap")
+ if enable_power_cap is True:
+ cloned_data.update({"EnablePowerCapSettings": enable_power_cap, "PowerCap": str(power_cap)})
+ else:
+ cloned_data.update({"EnablePowerCapSettings": enable_power_cap})
+ if module.params.get("redundancy_configuration") is not None:
+ cloned_data.update({"RedundancyPolicy": module.params["redundancy_configuration"]["redundancy_policy"]})
+ if module.params.get("hot_spare_configuration") is not None:
+ if module.params["hot_spare_configuration"]["enable_hot_spare"] is None:
+ module.fail_json(msg="missing parameter: enable_hot_spare")
+ enable_hot_spare = module.params["hot_spare_configuration"]["enable_hot_spare"]
+ primary_grid = module.params["hot_spare_configuration"].get("primary_grid")
+ if enable_hot_spare is True:
+ cloned_data.update({"EnableHotSpare": enable_hot_spare, "PrimaryGrid": primary_grid})
+ else:
+ cloned_data.update({"EnableHotSpare": enable_hot_spare})
+ power_diff = bool(set(power_data.items()) ^ set(cloned_data.items()))
+ if not power_diff and not module.check_mode:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ elif not power_diff and module.check_mode:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ elif power_diff and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ cloned_data.update({"SettingType": "Power"})
+ return cloned_data
+
+
+def fetch_device_details(module, rest_obj):
+ device_id, tag, final_resp = module.params.get("device_id"), module.params.get("device_service_tag"), {}
+ if device_id is None and tag is None:
+ key, value = get_chassis_device(module, rest_obj)
+ device_id = value
+ else:
+ key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag)
+ param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value)
+ resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value})
+ resp_data = resp.json_data.get("value")
+ rename_key = "id" if key == "Id" else "service tag"
+ if not resp_data:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag:
+ device_id = resp_data[0]["Id"]
+ elif key == "Id" and resp_data[0]["Id"] == device_id:
+ device_id = resp_data[0]["Id"]
+ else:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ try:
+ loc_resp = rest_obj.invoke_request("GET", POWER_API.format(device_id))
+ except HTTPError as err:
+ if err.code == 404:
+ module.fail_json(msg=POWER_FAIL_MSG)
+ err_message = json.load(err)
+ error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo')
+ if error_msg and error_msg[0].get("MessageId") == "CGEN1004":
+ module.fail_json(msg=POWER_FAIL_MSG)
+ else:
+ payload = check_mode_validation(module, loc_resp.json_data)
+ final_resp = rest_obj.invoke_request("PUT", POWER_API.format(device_id), data=payload)
+ return final_resp
+
+
+def main():
+ power_options = {"enable_power_cap": {"type": "bool", "required": True},
+ "power_cap": {"type": "int", "required": False}}
+ redundancy_options = {"redundancy_policy": {"type": "str", "default": "NO_REDUNDANCY",
+ "choices": ["NO_REDUNDANCY", "GRID_REDUNDANCY", "PSU_REDUNDANCY"]}}
+ hot_spare_options = {"enable_hot_spare": {"required": True, "type": "bool"},
+ "primary_grid": {"required": False, "type": "str", "default": "GRID_1",
+ "choices": ["GRID_1", "GRID_2"]}}
+ specs = {
+ "device_id": {"required": False, "type": "int"},
+ "device_service_tag": {"required": False, "type": "str"},
+ "power_configuration": {"type": "dict", "required": False, "options": power_options,
+ "required_if": [["enable_power_cap", True, ("power_cap",), True]]},
+ "redundancy_configuration": {"type": "dict", "required": False, "options": redundancy_options},
+ "hot_spare_configuration": {"type": "dict", "required": False, "options": hot_spare_options,
+ "required_if": [["enable_hot_spare", True, ("primary_grid",)]]},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[('device_id', 'device_service_tag')],
+ required_one_of=[["power_configuration", "redundancy_configuration", "hot_spare_configuration"]],
+ supports_check_mode=True,
+ )
+ try:
+ if not any([module.params.get("power_configuration"), module.params.get("redundancy_configuration"),
+ module.params.get("hot_spare_configuration")]):
+ module.fail_json(msg=CONFIG_FAIL_MSG)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ check_domain_service(module, rest_obj)
+ resp = fetch_device_details(module, rest_obj)
+ module.exit_json(msg=SUCCESS_MSG, power_details=resp.json_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
new file mode 100644
index 000000000..183b7f67e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
@@ -0,0 +1,674 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_device_quick_deploy
+short_description: Configure Quick Deploy settings on OpenManage Enterprise Modular.
+description: This module allows to configure the Quick Deploy settings of the server or IOM
+ on OpenManage Enterprise Modular.
+version_added: "5.0.0"
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_id:
+ type: int
+ description:
+ - The ID of the chassis for which the Quick Deploy settings to be deployed.
+ - If the device ID is not specified, this module updates the Quick Deploy settings for the I(hostname).
+ - I(device_id) is mutually exclusive with I(device_service_tag).
+ device_service_tag:
+ type: str
+ description:
+ - The service tag of the chassis for which the Quick Deploy settings to be deployed.
+ - If the device service tag is not specified, this module updates the Quick Deploy settings for the I(hostname).
+ - I(device_service_tag) is mutually exclusive with I(device_id).
+ setting_type:
+ type: str
+ required: True
+ choices: [ServerQuickDeploy, IOMQuickDeploy]
+ description:
+ - The type of the Quick Deploy settings to be applied.
+ - C(ServerQuickDeploy) to apply the server Quick Deploy settings.
+ - C(IOMQuickDeploy) to apply the IOM Quick Deploy settings.
+ job_wait:
+ type: bool
+ description: Determines whether to wait for the job completion or not.
+ default: True
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ default: 120
+ quick_deploy_options:
+ type: dict
+ required: True
+ description: The Quick Deploy settings for server and IOM quick deploy.
+ suboptions:
+ password:
+ type: str
+ description:
+ - The password to login to the server or IOM.
+ - The module will always report change when I(password) option is added.
+ ipv4_enabled:
+ type: bool
+ description: Enables or disables the IPv4 network.
+ ipv4_network_type:
+ type: str
+ choices: [Static, DHCP]
+ description:
+ - IPv4 network type.
+ - I(ipv4_network_type) is required if I(ipv4_enabled) is C(True).
+ - C(Static) to configure the static IP settings.
+ - C(DHCP) to configure the Dynamic IP settings.
+ ipv4_subnet_mask:
+ type: str
+ description:
+ - IPv4 subnet mask.
+ - I(ipv4_subnet_mask) is required if I(ipv4_network_type) is C(Static).
+ ipv4_gateway:
+ type: str
+ description:
+ - IPv4 gateway.
+ - I(ipv4_gateway) is required if I(ipv4_network_type) is C(Static).
+ ipv6_enabled:
+ type: bool
+ description: Enables or disables the IPv6 network.
+ ipv6_network_type:
+ type: str
+ choices: [Static, DHCP]
+ description:
+ - IPv6 network type.
+ - I(ipv6_network_type) is required if I(ipv6_enabled) is C(True).
+ - C(Static) to configure the static IP settings.
+ - C(DHCP) to configure the Dynamic IP settings.
+ ipv6_prefix_length:
+ type: int
+ description:
+ - IPV6 prefix length.
+ - I(ipv6_prefix_length) is required if I(ipv6_network_type) is C(Static).
+ ipv6_gateway:
+ type: str
+ description:
+ - IPv6 gateway.
+ - I(ipv6_gateway) is required if I(ipv6_network_type) is C(Static).
+ slots:
+ type: list
+ elements: dict
+ description: The slot configuration for the server or IOM.
+ suboptions:
+ slot_id:
+ type: int
+ required: True
+ description: The ID of the slot.
+ slot_ipv4_address:
+ type: str
+ description: The IPv4 address of the slot.
+ slot_ipv6_address:
+ type: str
+ description: The IPv6 address of the slot.
+ vlan_id:
+ type: int
+ description: The ID of the VLAN.
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+ - The module will always report change when I(password) option is added.
+ - If the chassis is a member of a multi-chassis group and it is assigned as a backup
+ lead chassis, the operations performed on the chassis using this module may
+ conflict with the management operations performed on the chassis through the lead chassis.
+"""
+
+EXAMPLES = """
+---
+- name: Configure server Quick Deploy settings of the chassis using device ID.
+ dellemc.openmanage.ome_device_quick_deploy:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ device_id: 25011
+ setting_type: ServerQuickDeploy
+ ca_path: "/path/to/ca_cert.pem"
+ quick_deploy_options:
+ password: "password"
+ ipv4_enabled: True
+ ipv4_network_type: Static
+ ipv4_subnet_mask: 255.255.255.0
+ ipv4_gateway: 192.168.0.1
+ ipv6_enabled: True
+ ipv6_network_type: Static
+ ipv6_prefix_length: 1
+ ipv6_gateway: "::"
+ slots:
+ - slot_id: 1
+ slot_ipv4_address: 192.168.0.2
+ slot_ipv6_address: "::"
+ vlan_id: 1
+ - slot_id: 2
+ slot_ipv4_address: 192.168.0.3
+ slot_ipv6_address: "::"
+ vlan_id: 2
+
+- name: Configure server Quick Deploy settings of the chassis using device service tag.
+ dellemc.openmanage.ome_device_quick_deploy:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ device_service_tag: GHRT2RL
+ setting_type: IOMQuickDeploy
+ ca_path: "/path/to/ca_cert.pem"
+ quick_deploy_options:
+ password: "password"
+ ipv4_enabled: True
+ ipv4_network_type: Static
+ ipv4_subnet_mask: 255.255.255.0
+ ipv4_gateway: 192.168.0.1
+ ipv6_enabled: True
+ ipv6_network_type: Static
+ ipv6_prefix_length: 1
+ ipv6_gateway: "::"
+ slots:
+ - slot_id: 1
+ slot_ipv4_address: 192.168.0.2
+ slot_ipv6_address: "::"
+ vlan_id: 1
+ - slot_id: 2
+ slot_ipv4_address: 192.168.0.3
+ slot_ipv6_address: "::"
+ vlan_id: 2
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the device quick deploy settings.
+ returned: always
+ sample: "Successfully deployed the quick deploy settings."
+job_id:
+ type: int
+ description: The job ID of the submitted quick deploy job.
+ returned: when quick deploy job is submitted.
+ sample: 1234
+quick_deploy_settings:
+ type: dict
+ description: returned when quick deploy settings are deployed successfully.
+ returned: success
+ sample: {
+ "DeviceId": 25011,
+ "SettingType": "ServerQuickDeploy",
+ "ProtocolTypeV4": true,
+ "NetworkTypeV4": "Static",
+ "IpV4Gateway": 192.168.0.1,
+ "IpV4SubnetMask": "255.255.255.0",
+ "ProtocolTypeV6": true,
+ "NetworkTypeV6": "Static",
+ "PrefixLength": "2",
+ "IpV6Gateway": "::",
+ "slots": [
+ {
+ "DeviceId": 25011,
+ "DeviceCapabilities": [18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 41, 8, 7, 4, 3, 2, 1, 31, 30],
+ "DeviceIPV4Address": "192.168.0.2",
+ "DeviceIPV6Address": "::",
+ "Dhcpipv4": "Disabled",
+ "Dhcpipv6": "Disabled",
+ "Ipv4Enabled": "Enabled",
+ "Ipv6Enabled": "Enabled",
+ "Model": "PowerEdge MX840c",
+ "SlotIPV4Address": "192.168.0.2",
+ "SlotIPV6Address": "::",
+ "SlotId": 1,
+ "SlotSelected": true,
+ "SlotSettingsApplied": true,
+ "SlotType": "2000",
+ "Type": "1000",
+ "VlanId": "1"
+ },
+ {
+ "DeviceId": 0,
+ "Model": "",
+ "SlotIPV4Address": "0.0.0.0",
+ "SlotIPV6Address": "::",
+ "SlotId": 2,
+ "SlotSelected": false,
+ "SlotSettingsApplied": false,
+ "SlotType": "2000",
+ "Type": "0"
+ },
+ {
+ "DeviceId": 0,
+ "Model": "",
+ "SlotIPV4Address": "0.0.0.0",
+ "SlotIPV6Address": "::",
+ "SlotId": 3,
+ "SlotSelected": false,
+ "SlotSettingsApplied": false,
+ "SlotType": "2000",
+ "Type": "0"
+ },
+ {
+ "DeviceId": 0,
+ "Model": "",
+ "SlotIPV4Address": "0.0.0.0",
+ "SlotIPV6Address": "::",
+ "SlotId": 4,
+ "SlotSelected": false,
+ "SlotSettingsApplied": false,
+ "SlotType": "2000",
+ "Type": "0"
+ },
+ {
+ "DeviceId": 0,
+ "Model": "",
+ "SlotIPV4Address": "0.0.0.0",
+ "SlotIPV6Address": "::",
+ "SlotId": 5,
+ "SlotSelected": false,
+ "SlotSettingsApplied": false,
+ "SlotType": "2000",
+ "Type": "0"
+ },
+ {
+ "DeviceId": 0,
+ "Model": "",
+ "SlotIPV4Address": "0.0.0.0",
+ "SlotIPV6Address": "::",
+ "SlotId": 6,
+ "SlotSelected": false,
+ "SlotSettingsApplied": false,
+ "SlotType": "2000",
+ "Type": "0"
+ },
+ {
+ "DeviceId": 0,
+ "Model": "",
+ "SlotIPV4Address": "0.0.0.0",
+ "SlotIPV6Address": "::",
+ "SlotId": 7,
+ "SlotSelected": false,
+ "SlotSettingsApplied": false,
+ "SlotType": "2000",
+ "Type": "0"
+ },
+ {
+ "DeviceId": 0,
+ "Model": "",
+ "SlotIPV4Address": "0.0.0.0",
+ "SlotIPV6Address": "::",
+ "SlotId": 8,
+ "SlotSelected": false,
+ "SlotSettingsApplied": false,
+ "SlotType": "2000",
+ "Type": "0"
+ }
+ ]
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+import copy
+import json
+import socket
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+DOMAIN_URI = "ManagementDomainService/Domains"
+DEVICE_URI = "DeviceService/Devices"
+QUICK_DEPLOY_API = "DeviceService/Devices({0})/Settings('{1}')"
+
+DOMAIN_FAIL_MSG = "The operation to configure the Quick Deploy settings is supported only on " \
+ "OpenManage Enterprise Modular."
+IP_FAIL_MSG = "Invalid '{0}' address provided for the {1}."
+FETCH_FAIL_MSG = "Unable to retrieve the device information."
+DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid."
+QUICK_DEPLOY_FAIL_MSG = "Unable to complete the operation because the {0} configuration settings " \
+ "are not supported on the specified device."
+INVALID_SLOT_MSG = "Unable to complete the operation because the entered slot(s) '{0}' does not exist."
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+SUCCESS_MSG = "Successfully deployed the Quick Deploy settings."
+FAIL_MSG = "Unable to deploy the Quick Deploy settings."
+QUICK_DEPLOY_JOB_DESC = "The Quick Deploy job is initiated from the OpenManage Ansible Module collections."
+JOB_MSG = "Successfully submitted the Quick Deploy job settings."
+
+
+def validate_ip_address(address, flag):
+ value = True
+ try:
+ if flag == "IPV4":
+ socket.inet_aton(address)
+ value = address.count('.') == 3
+ else:
+ socket.inet_pton(socket.AF_INET6, address)
+ except socket.error:
+ value = False
+ return value
+
+
+def ip_address_field(module, field, deploy_options, slot=False):
+ module_params = deploy_options
+ if slot:
+ module_params = deploy_options
+ for val in field:
+ field_value = module_params.get(val[0])
+ if field_value is not None:
+ valid = validate_ip_address(module_params.get(val[0]), val[1])
+ if valid is False:
+ module.fail_json(msg=IP_FAIL_MSG.format(field_value, val[0]))
+ return
+
+
+def check_domain_service(module, rest_obj):
+ try:
+ rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5)
+ except HTTPError as err:
+ err_message = json.load(err)
+ if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
+ module.fail_json(msg=DOMAIN_FAIL_MSG)
+ return
+
+
+def get_ip_from_host(hostname):
+ ipaddr = hostname
+ try:
+ result = socket.getaddrinfo(hostname, None)
+ last_element = result[-1]
+ ip_address = last_element[-1][0]
+ if ip_address:
+ ipaddr = ip_address
+ except socket.gaierror:
+ ipaddr = hostname
+ except Exception:
+ ipaddr = hostname
+ return ipaddr
+
+
+def get_chassis_device(module, rest_obj):
+ key, value = None, None
+ ipaddress = get_ip_from_host(module.params["hostname"])
+ resp = rest_obj.invoke_request("GET", DOMAIN_URI)
+ for data in resp.json_data["value"]:
+ if ipaddress in data["PublicAddress"]:
+ key, value = ("Id", data["DeviceId"])
+ break
+ else:
+ module.fail_json(msg=FETCH_FAIL_MSG)
+ return key, value
+
+
+def check_mode_validation(module, deploy_data):
+ deploy_options = module.params.get("quick_deploy_options")
+ req_data, req_payload = {}, {}
+ if deploy_options.get("password") is not None:
+ req_data["rootCredential"] = deploy_options.get("password")
+ ipv4_enabled = deploy_options.get("ipv4_enabled")
+ ipv4_enabled_deploy = deploy_data["ProtocolTypeV4"]
+ ipv6_enabled_deploy = deploy_data["ProtocolTypeV6"]
+ ipv4_nt_deploy = deploy_data.get("NetworkTypeV4")
+ ipv6_nt_deploy = deploy_data.get("NetworkTypeV6")
+ if ipv4_enabled is not None and ipv4_enabled is True or \
+ ipv4_enabled_deploy is not None and ipv4_enabled_deploy is True:
+ req_data["ProtocolTypeV4"] = None
+ if ipv4_enabled is not None:
+ req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
+ ipv4_network_type = deploy_options.get("ipv4_network_type")
+ req_data["NetworkTypeV4"] = ipv4_network_type
+ if ipv4_network_type == "Static" or ipv4_nt_deploy is not None and ipv4_nt_deploy == "Static":
+ req_data["IpV4SubnetMask"] = deploy_options.get("ipv4_subnet_mask")
+ req_data["IpV4Gateway"] = deploy_options.get("ipv4_gateway")
+ elif ipv4_enabled is not None and ipv4_enabled is False:
+ req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
+ ipv6_enabled = deploy_options.get("ipv6_enabled")
+ if ipv6_enabled is not None and ipv6_enabled is True or \
+ ipv6_enabled_deploy is not None and ipv6_enabled_deploy is True:
+ req_data["ProtocolTypeV6"] = None
+ if ipv6_enabled is not None:
+ req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
+ ipv6_network_type = deploy_options.get("ipv6_network_type")
+ req_data["NetworkTypeV6"] = ipv6_network_type
+ if ipv6_network_type == "Static" or ipv6_nt_deploy is not None and ipv6_nt_deploy == "Static":
+ req_data["PrefixLength"] = deploy_options.get("ipv6_prefix_length")
+ if deploy_options.get("ipv6_prefix_length") is not None:
+ req_data["PrefixLength"] = str(deploy_options.get("ipv6_prefix_length"))
+ req_data["IpV6Gateway"] = deploy_options.get("ipv6_gateway")
+ elif ipv6_enabled is not None and ipv6_enabled is False:
+ req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
+ resp_data = {
+ "ProtocolTypeV4": str(ipv4_enabled_deploy).lower(), "NetworkTypeV4": deploy_data.get("NetworkTypeV4"),
+ "IpV4SubnetMask": deploy_data.get("IpV4SubnetMask"), "IpV4Gateway": deploy_data.get("IpV4Gateway"),
+ "ProtocolTypeV6": str(ipv6_enabled_deploy).lower(), "NetworkTypeV6": deploy_data.get("NetworkTypeV6"),
+ "PrefixLength": deploy_data.get("PrefixLength"), "IpV6Gateway": deploy_data.get("IpV6Gateway")}
+ resp_filter_data = dict([(k, v) for k, v in resp_data.items() if v is not None])
+ req_data_filter = dict([(k, v) for k, v in req_data.items() if v is not None])
+ diff_changes = [bool(set(resp_filter_data.items()) ^ set(req_data_filter.items()))]
+ req_slot_payload, invalid_slot = [], []
+ slots = deploy_options.get("slots")
+ if slots is not None:
+ exist_slot = deploy_data.get("Slots")
+ for each in slots:
+ exist_filter_slot = list(filter(lambda d: d["SlotId"] in [each["slot_id"]], exist_slot))
+ if exist_filter_slot:
+ req_slot_1 = {"SlotId": each["slot_id"], "SlotIPV4Address": each.get("slot_ipv4_address"),
+ "SlotIPV6Address": each.get("slot_ipv6_address"), "VlanId": each.get("vlan_id")}
+ if each.get("vlan_id") is not None:
+ req_slot_1.update({"VlanId": str(each.get("vlan_id"))})
+ req_filter_slot = dict([(k, v) for k, v in req_slot_1.items() if v is not None])
+ exist_slot_1 = {"SlotId": exist_filter_slot[0]["SlotId"],
+ "SlotIPV4Address": exist_filter_slot[0]["SlotIPV4Address"],
+ "SlotIPV6Address": exist_filter_slot[0]["SlotIPV6Address"],
+ "VlanId": exist_filter_slot[0]["VlanId"]}
+ exist_filter_slot = dict([(k, v) for k, v in exist_slot_1.items() if v is not None])
+ cp_exist_filter_slot = copy.deepcopy(exist_filter_slot)
+ cp_exist_filter_slot.update(req_filter_slot)
+ diff_changes.append(bool(set(cp_exist_filter_slot.items()) ^ set(exist_filter_slot.items())))
+ req_slot_payload.append(cp_exist_filter_slot)
+ else:
+ invalid_slot.append(each["slot_id"])
+ if invalid_slot:
+ module.fail_json(msg=INVALID_SLOT_MSG.format(", ".join(map(str, invalid_slot))))
+ if module.check_mode and any(diff_changes) is True:
+ module.exit_json(msg=CHANGES_FOUND, changed=True, quick_deploy_settings=deploy_data)
+ elif (module.check_mode and any(diff_changes) is False) or \
+ (not module.check_mode and any(diff_changes) is False):
+ module.exit_json(msg=NO_CHANGES_FOUND, quick_deploy_settings=deploy_data)
+ req_payload.update(resp_filter_data)
+ req_payload.update(req_data_filter)
+ return req_payload, req_slot_payload
+
+
+def job_payload_submission(rest_obj, payload, slot_payload, settings_type, device_id, resp_data):
+ job_params = []
+ job_params.append({"Key": "protocolTypeV4", "Value": payload["ProtocolTypeV4"]})
+ job_params.append({"Key": "protocolTypeV6", "Value": payload["ProtocolTypeV6"]})
+ s_type = "SERVER_QUICK_DEPLOY" if settings_type == "ServerQuickDeploy" else "IOM_QUICK_DEPLOY"
+ job_params.append({"Key": "operationName", "Value": "{0}".format(s_type)})
+ job_params.append({"Key": "deviceId", "Value": "{0}".format(device_id)})
+ if payload.get("rootCredential") is not None:
+ job_params.append({"Key": "rootCredential", "Value": payload["rootCredential"]})
+ if payload.get("NetworkTypeV4") is not None:
+ job_params.append({"Key": "networkTypeV4", "Value": payload["NetworkTypeV4"]})
+ if payload.get("IpV4SubnetMask") is not None:
+ job_params.append({"Key": "subnetMaskV4", "Value": payload["IpV4SubnetMask"]})
+ if payload.get("IpV4Gateway") is not None:
+ job_params.append({"Key": "gatewayV4", "Value": payload["IpV4Gateway"]})
+ if payload.get("NetworkTypeV6") is not None:
+ job_params.append({"Key": "networkTypeV6", "Value": payload["NetworkTypeV6"]})
+ if payload.get("PrefixLength") is not None:
+ job_params.append({"Key": "prefixLength", "Value": payload["PrefixLength"]})
+ if payload.get("IpV6Gateway") is not None:
+ job_params.append({"Key": "gatewayV6", "Value": payload["IpV6Gateway"]})
+ updated_slot = []
+ if slot_payload:
+ for each in slot_payload:
+ updated_slot.append(each.get("SlotId"))
+ job_params.append(
+ {"Key": "slotId={0}".format(each.get("SlotId")),
+ "Value": "SlotSelected=true;IPV4Address={0};IPV6Address={1};VlanId={2}".format(
+ each.get("SlotIPV4Address"), each.get("SlotIPV6Address"), each.get("VlanId"))})
+ slots = resp_data["Slots"]
+ if updated_slot is not None:
+ slots = list(filter(lambda d: d["SlotId"] not in updated_slot, slots))
+ for each in slots:
+ key = "slot_id={0}".format(each["SlotId"])
+ value = "SlotSelected={0};".format(each["SlotSelected"])
+ if each.get("SlotIPV4Address") is not None:
+ value = value + "IPV4Address={0};".format(each["SlotIPV4Address"])
+ if each.get("SlotIPV6Address") is not None:
+ value = value + "IPV6Address={0};".format(each["SlotIPV6Address"])
+ if each.get("VlanId") is not None:
+ value = value + "VlanId={0}".format(each["VlanId"])
+ job_params.append({"Key": key, "Value": value})
+ job_sub_resp = rest_obj.job_submission("Quick Deploy", QUICK_DEPLOY_JOB_DESC, [], job_params,
+ {"Id": 42, "Name": "QuickDeploy_Task"})
+ return job_sub_resp.json_data.get('Id')
+
+
+def get_device_details(rest_obj, module):
+ job_success_data, job_id = None, None
+ device_id, tag = module.params.get("device_id"), module.params.get("device_service_tag")
+ if device_id is None and tag is None:
+ key, value = get_chassis_device(module, rest_obj)
+ device_id = value
+ else:
+ key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag)
+ param_value = "{0} eq {1}".format(key, value) if key == "Id" else "{0} eq '{1}'".format(key, value)
+ resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param_value})
+ resp_data = resp.json_data.get("value")
+ rename_key = "id" if key == "Id" else "service tag"
+ if not resp_data:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ if key == "DeviceServiceTag" and resp_data[0]["DeviceServiceTag"] == tag:
+ device_id = resp_data[0]["Id"]
+ elif key == "Id" and resp_data[0]["Id"] == device_id:
+ device_id = resp_data[0]["Id"]
+ else:
+ module.fail_json(msg=DEVICE_FAIL_MSG.format(rename_key, value))
+ settings_type, settings_key = "IOMQuickDeploy", "IOM Quick Deploy"
+ if module.params["setting_type"] == "ServerQuickDeploy":
+ settings_type, settings_key = "ServerQuickDeploy", "Server Quick Deploy"
+ try:
+ deploy_resp = rest_obj.invoke_request("GET", QUICK_DEPLOY_API.format(device_id, settings_type))
+ except HTTPError as err:
+ err_message = json.load(err)
+ error_msg = err_message.get('error', {}).get('@Message.ExtendedInfo')
+ if error_msg and error_msg[0].get("MessageId") == "CGEN1004":
+ module.fail_json(msg=QUICK_DEPLOY_FAIL_MSG.format(settings_key))
+ else:
+ resp_data = rest_obj.strip_substr_dict(deploy_resp.json_data)
+ payload, slot_payload = check_mode_validation(module, resp_data)
+ job_id = job_payload_submission(rest_obj, payload, slot_payload, settings_type, device_id, resp_data)
+ if module.params["job_wait"]:
+ job_failed, job_msg = rest_obj.job_tracking(job_id, job_wait_sec=module.params["job_wait_timeout"])
+ if job_failed is True:
+ module.fail_json(msg=FAIL_MSG)
+ job_success_resp = rest_obj.invoke_request("GET", QUICK_DEPLOY_API.format(device_id, settings_type))
+ job_success_data = rest_obj.strip_substr_dict(job_success_resp.json_data)
+ return job_id, job_success_data
+
+
+def main():
+ slots = {
+ "slot_id": {"required": True, "type": "int"},
+ "slot_ipv4_address": {"type": "str"},
+ "slot_ipv6_address": {"type": "str"},
+ "vlan_id": {"type": "int"},
+ }
+ quick_deploy = {
+ "password": {"type": "str", "no_log": True},
+ "ipv4_enabled": {"type": "bool"},
+ "ipv4_network_type": {"type": "str", "choices": ["Static", "DHCP"]},
+ "ipv4_subnet_mask": {"type": "str"},
+ "ipv4_gateway": {"type": "str"},
+ "ipv6_enabled": {"type": "bool"},
+ "ipv6_network_type": {"type": "str", "choices": ["Static", "DHCP"]},
+ "ipv6_prefix_length": {"type": "int"},
+ "ipv6_gateway": {"type": "str"},
+ "slots": {"type": "list", "elements": "dict", "options": slots},
+ }
+ specs = {
+ "device_id": {"required": False, "type": "int"},
+ "device_service_tag": {"required": False, "type": "str"},
+ "setting_type": {"required": True, "choices": ["ServerQuickDeploy", "IOMQuickDeploy"]},
+ "quick_deploy_options": {
+ "type": "dict", "required": True, "options": quick_deploy,
+ "required_if": [
+ ["ipv4_enabled", True, ["ipv4_network_type"]],
+ ["ipv4_network_type", "Static", ["ipv4_subnet_mask", "ipv4_gateway"]],
+ ["ipv6_enabled", True, ["ipv6_network_type"]],
+ ["ipv6_network_type", "Static", ["ipv6_prefix_length", "ipv6_gateway"]],
+ ],
+ },
+ "job_wait": {"type": "bool", "default": True},
+ "job_wait_timeout": {"type": "int", "default": 120},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=specs,
+ mutually_exclusive=[('device_id', 'device_service_tag')],
+ supports_check_mode=True,)
+ if module.params["quick_deploy_options"] is None:
+ module.fail_json(msg="missing required arguments: quick_deploy_options")
+ fields = [("ipv4_subnet_mask", "IPV4"), ("ipv4_gateway", "IPV4"), ("ipv6_gateway", "IPV6")]
+ ip_address_field(module, fields, module.params["quick_deploy_options"], slot=False)
+ slot_options = module.params["quick_deploy_options"].get("slots")
+ if slot_options is not None:
+ slot_field = [("slot_ipv4_address", "IPV4"), ("slot_ipv6_address", "IPV6")]
+ for dep_opt in slot_options:
+ ip_address_field(module, slot_field, dep_opt, slot=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ check_domain_service(module, rest_obj)
+ job_id, data = get_device_details(rest_obj, module)
+ if job_id is not None and data is not None:
+ module.exit_json(msg=SUCCESS_MSG, job_id=job_id, quick_deploy_settings=data, changed=True)
+ module.exit_json(msg=JOB_MSG, job_id=job_id)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError,
+ AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
new file mode 100644
index 000000000..954395280
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
@@ -0,0 +1,445 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 6.1.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_devices
+short_description: Perform device-specific operations on target devices
+description: Perform device-specific operations such as refresh inventory, clear iDRAC job queue, and reset iDRAC from OpenManage Enterprise.
+version_added: 6.1.0
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+extends_documentation_fragment:
+ - dellemc.openmanage.oment_auth_options
+options:
+ device_service_tags:
+ description:
+ - Service tag of the target devices.
+ - This is mutually exclusive with I(device_ids).
+ type: list
+ elements: str
+ device_ids:
+ description:
+ - IDs of the target devices.
+ - This is mutually exclusive with I(device_service_tags).
+ type: list
+ elements: int
+ state:
+ description:
+ - C(present) Allows to perform the I(device_action) on the target devices.
+ - "C(absent) Removes the device from OpenManage Enterprise. Job is not triggered. I(job_wait), I(job_schedule),
+ I(job_name), and I(job_description) are not applicable to this operation."
+ type: str
+ choices: [present, absent]
+ default: present
+ device_action:
+ description:
+ - C(refresh_inventory) refreshes the inventory on the target devices.
+ - C(reset_idrac) Triggers a reset on the target iDRACs.
+ - C(clear_idrac_job_queue) Clears the job queue on the target iDRACs.
+ - A job is triggered for each action.
+ type: str
+ choices: [refresh_inventory, reset_idrac, clear_idrac_job_queue]
+ default: refresh_inventory
+ job_wait:
+ description:
+ - Provides an option to wait for the job completion.
+ - This option is applicable when I(state) is C(present).
+ - This is applicable when I(job_schedule) is C(startnow).
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 1200
+ job_schedule:
+ description: Provide the cron string to schedule the job.
+ type: str
+ default: startnow
+ job_name:
+ description: Optional name for the job.
+ type: str
+ job_description:
+ description: Optional description for the job.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - For C(idrac_reset), the job triggers only the iDRAC reset operation and does not track the complete reset cycle.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Refresh Inventory
+ dellemc.openmanage.ome_devices:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_action: refresh_inventory
+ device_service_tags:
+ - SVCTAG1
+
+- name: Clear iDRAC job queue
+ dellemc.openmanage.ome_devices:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_action: clear_idrac_job_queue
+ device_service_tags:
+ - SVCTAG1
+
+- name: Reset iDRAC using the service tag
+ dellemc.openmanage.ome_devices:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_action: reset_idrac
+ device_service_tags:
+ - SVCTAG1
+
+- name: Remove devices using servicetags
+ dellemc.openmanage.ome_devices:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ device_service_tags:
+ - SVCTAG1
+ - SVCTAF2
+
+- name: Remove devices using IDs
+ dellemc.openmanage.ome_devices:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ device_ids:
+ - 10235
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the devices operation.
+ returned: always
+ sample: "Successfully removed the device(s)."
+job:
+ type: dict
+ description: Job details of the devices operation.
+ returned: success
+ sample: {
+ "Id": 14874,
+ "JobName": "Refresh inventory",
+ "JobDescription": "The Refresh inventory task initiated from OpenManage Ansible Modules for devices with the ids '13216'.",
+ "Schedule": "startnow",
+ "State": "Enabled",
+ "CreatedBy": "admin",
+ "UpdatedBy": null,
+ "Visible": true,
+ "Editable": true,
+ "Builtin": false,
+ "UserGenerated": true,
+ "Targets": [
+ {
+ "JobId": 14874,
+ "Id": 13216,
+ "Data": "",
+ "TargetType": {
+ "Id": 1000,
+ "Name": "DEVICE"
+ }
+ }
+ ],
+ "Params": [
+ {
+ "JobId": 14874,
+ "Key": "action",
+ "Value": "CONFIG_INVENTORY"
+ },
+ {
+ "JobId": 14874,
+ "Key": "isCollectDriverInventory",
+ "Value": "true"
+ }
+ ],
+ "LastRunStatus": {
+ "@odata.type": "#JobService.JobStatus",
+ "Id": 2060,
+ "Name": "Completed"
+ },
+ "JobType": {
+ "@odata.type": "#JobService.JobType",
+ "Id": 8,
+ "Name": "Inventory_Task",
+ "Internal": false
+ },
+ "JobStatus": {
+ "@odata.type": "#JobService.JobStatus",
+ "Id": 2020,
+ "Name": "Scheduled"
+ },
+ "ExecutionHistories@odata.navigationLink": "/api/JobService/Jobs(14874)/ExecutionHistories",
+ "LastExecutionDetail": {
+ "@odata.id": "/api/JobService/Jobs(14874)/LastExecutionDetail"
+ }
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1002",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the operation because the requested URI is invalid.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Enter a valid URI and retry the operation."
+ }
+ ]
+ }
+}
+"""
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import \
+ get_rest_items, strip_substr_dict, job_tracking, apply_diff_key
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import CHANGES_MSG, NO_CHANGES_MSG
+
+DEVICE_URI = "DeviceService/Devices"
+JOBS_URI = "JobService/Jobs"
+JOB_URI = "JobService/Jobs({job_id})"
+RUN_JOB_URI = "JobService/Actions/JobService.RunJobs"
+LAST_EXEC = "JobService/Jobs({job_id})/LastExecutionDetail"
+DELETE_DEVICES_URI = "DeviceService/Actions/DeviceService.RemoveDevices"
+DELETE_SUCCESS = "The devices(s) are removed successfully."
+INVALID_DEV_ST = "Unable to complete the operation because the entered target device(s) '{0}' are invalid."
+JOB_DESC = "The {0} task initiated from OpenManage Ansible Modules for devices with the ids '{1}'."
+APPLY_TRIGGERED = "Successfully initiated the device action job."
+JOB_SCHEDULED = "The job is scheduled successfully."
+SUCCESS_MSG = "The device operation is performed successfully."
+
+all_device_types = [1000, 2000, 4000, 5000, 7000, 8000, 9001]
+device_type_map = {"refresh_inventory": all_device_types, "reset_idrac": [1000], "clear_idrac_job_queue": [1000]}
+job_type_map = {"refresh_inventory": 8, "reset_idrac": 3, "clear_idrac_job_queue": 3}
+jtype_map = {3: "DeviceAction_Task", 8: "Inventory_Task"}
+job_params_map = {"refresh_inventory": {"action": "CONFIG_INVENTORY",
+ "isCollectDriverInventory": "true"},
+ "reset_idrac": {"operationName": "RESET_IDRAC"},
+ "clear_idrac_job_queue": {"operationName": "REMOTE_RACADM_EXEC",
+ "Command": "jobqueue delete -i JID_CLEARALL_FORCE",
+ "CommandTimeout": "60", "deviceTypes": "1000"}}
+jobname_map = {"refresh_inventory": "Refresh inventory", "reset_idrac": "Reset iDRAC",
+ "clear_idrac_job_queue": "Clear iDRAC job queue"}
+
+
+def get_dev_ids(module, rest_obj, types):
+ invalids = set()
+ sts = module.params.get('device_ids')
+ param = "{0} eq {1}"
+ srch = 'Id'
+ if not sts:
+ sts = module.params.get('device_service_tags')
+ param = "{0} eq '{1}'"
+ srch = 'Identifier'
+ devs = []
+ for st in sts:
+ resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": param.format(srch, st)})
+ val = resp.json_data.get('value')
+ if not val:
+ invalids.add(st)
+ for v in val:
+ if v[srch] == st:
+ if v["Type"] in types:
+ devs.extend(val)
+ else:
+ invalids.add(st)
+ break
+ else:
+ invalids.add(st)
+ valids = [(dv.get('Id')) for dv in devs]
+ return valids, invalids
+
+
+def delete_devices(module, rest_obj, valid_ids):
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ payload = {"DeviceIds": list(valid_ids)}
+ rest_obj.invoke_request('POST', DELETE_DEVICES_URI, data=payload)
+ module.exit_json(msg=DELETE_SUCCESS, changed=True)
+
+
+def update_common_job(module, payload, task, valid_ids):
+ payload["Schedule"] = module.params.get('job_schedule')
+ if module.params.get('job_name'):
+ payload["JobName"] = module.params.get('job_name')
+ else:
+ payload["JobName"] = jobname_map.get(task)
+ if module.params.get('job_description'):
+ payload["JobDescription"] = module.params.get('job_description')
+ else:
+ payload["JobDescription"] = JOB_DESC.format(jobname_map.get(task), ",".join(map(str, valid_ids)))
+
+
+def check_similar_job(rest_obj, payload):
+ query_param = {"$filter": "JobType/Id eq {0}".format(payload['JobType'])}
+ job_resp = rest_obj.invoke_request("GET", JOBS_URI, query_param=query_param)
+ job_list = job_resp.json_data.get('value', [])
+ for jb in job_list:
+ if jb['JobName'] == payload['JobName'] and jb['JobDescription'] == payload['JobDescription'] and \
+ jb['Schedule'] == payload['Schedule']:
+ jb_prm = dict((k.get('Key'), k.get('Value')) for k in jb.get('Params'))
+ if not jb_prm == payload.get('Params'):
+ continue
+ trgts = dict((t.get('Id'), t.get('TargetType').get('Name')) for t in jb.get('Targets'))
+ if not trgts == payload.get('Targets'):
+ continue
+ return jb
+ return {}
+
+
+def job_wait(module, rest_obj, job):
+ mparams = module.params
+ if mparams.get('job_schedule') != 'startnow':
+ module.exit_json(changed=True, msg=JOB_SCHEDULED, job=strip_substr_dict(job))
+ if not module.params.get("job_wait"):
+ module.exit_json(changed=True, msg=APPLY_TRIGGERED, job=strip_substr_dict(job))
+ else:
+ job_msg = SUCCESS_MSG
+ job_failed, msg, job_dict, wait_time = job_tracking(
+ rest_obj, JOB_URI.format(job_id=job['Id']), max_job_wait_sec=module.params.get('job_wait_timeout'),
+ initial_wait=3)
+ if job_failed:
+ try:
+ job_resp = rest_obj.invoke_request('GET', LAST_EXEC.format(job_id=job['Id']))
+ msg = job_resp.json_data.get("Value")
+ job_msg = msg.replace('\n', ' ')
+ except Exception:
+ job_msg = msg
+ module.exit_json(failed=job_failed, msg=job_msg, job=strip_substr_dict(job), changed=True)
+
+
+def get_task_payload(task):
+ taskload = {}
+ taskload.update({"JobType": job_type_map.get(task, 8)})
+ taskload.update({"Params": job_params_map.get(task, {})})
+ return taskload
+
+
+def get_payload_method(task, valid_ids):
+ payload = get_task_payload(task)
+ targets = dict((dv, "DEVICE") for dv in valid_ids)
+ payload["Targets"] = targets
+ return payload, "POST", JOBS_URI
+
+
+def formalize_job_payload(payload):
+ payload["Id"] = 0
+ payload["State"] = "Enabled"
+ prms = payload['Params']
+ payload['Params'] = [({"Key": k, "Value": v}) for k, v in prms.items()]
+ trgts = payload['Targets']
+ payload['Targets'] = [({"Id": k, "Data": "", "TargetType": {"Id": 1000, "Name": v}}) for k, v in trgts.items()]
+ jtype = payload["JobType"]
+ payload["JobType"] = {"Id": jtype, "Name": jtype_map.get(jtype)}
+
+
+def perform_device_tasks(module, rest_obj, valid_ids):
+ task = module.params.get("device_action")
+ payload, method, uri = get_payload_method(task, valid_ids)
+ update_common_job(module, payload, task, valid_ids)
+ job = check_similar_job(rest_obj, payload)
+ if not job:
+ formalize_job_payload(payload)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ resp = rest_obj.invoke_request("POST", JOBS_URI, data=payload, api_timeout=60)
+ job_wait(module, rest_obj, resp.json_data)
+ else:
+ if module.params.get('job_schedule') == 'startnow' and job["LastRunStatus"]['Id'] != 2050:
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ resp = rest_obj.invoke_request("POST", RUN_JOB_URI, data={"JobIds": [job['Id']]})
+ job_wait(module, rest_obj, job)
+ module.exit_json(msg=NO_CHANGES_MSG, job=strip_substr_dict(job))
+
+
+def main():
+ specs = {
+ "device_service_tags": {"type": "list", "elements": 'str'},
+ "device_ids": {"type": "list", "elements": 'int'},
+ "state": {"type": "str", "choices": ["present", "absent"], "default": "present"},
+ "device_action": {"type": "str", "choices": ["refresh_inventory", "reset_idrac", "clear_idrac_job_queue"],
+ "default": 'refresh_inventory'},
+ "job_wait": {"type": "bool", "default": True},
+ "job_wait_timeout": {"type": "int", "default": 1200},
+ "job_schedule": {"type": "str", "default": 'startnow'},
+ "job_name": {"type": "str"},
+ "job_description": {"type": "str"},
+ # "job_params": {"type": "dict"}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[],
+ mutually_exclusive=[
+ ("device_service_tags", "device_ids"),
+ ],
+ required_one_of=[("device_service_tags", "device_ids")],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ if module.params.get("state") == 'present':
+ valids, invalids = get_dev_ids(module, rest_obj,
+ device_type_map.get(module.params.get("device_action")))
+ if invalids:
+ module.exit_json(failed=True, msg=INVALID_DEV_ST.format(",".join(map(str, invalids))))
+ perform_device_tasks(module, rest_obj, valids)
+ else:
+ valids, invalids = get_dev_ids(module, rest_obj, all_device_types)
+ if not valids:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ delete_devices(module, rest_obj, valids)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError,
+ OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py
new file mode 100644
index 000000000..71b0e0960
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py
@@ -0,0 +1,518 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.3.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: ome_diagnostics
+short_description: Export technical support logs(TSR) to network share location
+version_added: "3.6.0"
+description: This module allows to export SupportAssist collection logs from OpenManage Enterprise and
+ OpenManage Enterprise Modular and application logs from OpenManage Enterprise Modular to a CIFS or NFS share.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ device_ids:
+ type: list
+ description:
+ - List of target device IDs.
+ - This is applicable for C(support_assist_collection) and C(supportassist_collection) logs.
+ - This option is mutually exclusive with I(device_service_tags) and I(device_group_name).
+ elements: int
+ device_service_tags:
+ type: list
+ description:
+ - List of target identifier.
+ - This is applicable for C(support_assist_collection) and C(supportassist_collection) logs.
+ - This option is mutually exclusive with I(device_ids) and I(device_group_name).
+ elements: str
+ device_group_name:
+ type: str
+ description:
+ - Name of the device group to export C(support_assist_collection) or C(supportassist_collection) logs of all devices within the group.
+ - This is applicable for C(support_assist_collection) and C(supportassist_collection) logs.
+ - This option is not applicable for OpenManage Enterprise Modular.
+ - This option is mutually exclusive with I(device_ids) and I(device_service_tags).
+ log_type:
+ type: str
+ description:
+ - C(application) is applicable for OpenManage Enterprise Modular to export the application log bundle.
+ - C(support_assist_collection) and C(supportassist_collection) is applicable for one or more devices to export SupportAssist logs.
+ - C(support_assist_collection) and C(supportassist_collection) supports both OpenManage Enterprise and OpenManage Enterprise Modular.
+ - C(support_assist_collection) and C(supportassist_collection) does not support export of C(OS_LOGS) from OpenManage Enterprise.
+ If tried to export, the tasks will complete with errors, and the module fails.
+ choices: [application, support_assist_collection, supportassist_collection]
+ default: support_assist_collection
+ mask_sensitive_info:
+ type: bool
+ description:
+ - Select this option to mask the personal identification information such as IPAddress,
+ DNS, alert destination, email, gateway, inet6, MacAddress, netmask etc.
+ - This option is applicable for C(application) of I(log_type).
+ default: False
+ log_selectors:
+ type: list
+ description:
+ - By default, the SupportAssist logs contain only hardware logs. To collect additional logs
+ such as OS logs, RAID logs or Debug logs, specify the log types to be collected in the choices list.
+ - If the log types are not specified, only the hardware logs are exported.
+ - C(OS_LOGS) to collect OS Logs.
+ - C(RAID_LOGS) to collect RAID controller logs.
+ - C(DEBUG_LOGS) to collect Debug logs.
+ - This option is applicable only for C(support_assist_collection) and C(supportassist_collection) of I(log_type).
+ choices: [OS_LOGS, RAID_LOGS, DEBUG_LOGS]
+ elements: str
+ share_address:
+ type: str
+ required: True
+ description: Network share IP address.
+ share_name:
+ type: str
+ required: True
+ description:
+ - Network share path.
+ - Filename is auto generated and should not be provided as part of I(share_name).
+ share_type:
+ type: str
+ required: True
+ description: Network share type
+ choices: [NFS, CIFS]
+ share_user:
+ type: str
+ description:
+ - Network share username.
+ - This option is applicable for C(CIFS) of I(share_type).
+ share_password:
+ type: str
+ description:
+ - Network share password
+ - This option is applicable for C(CIFS) of I(share_type).
+ share_domain:
+ type: str
+ description:
+ - Network share domain name.
+ - This option is applicable for C(CIFS) if I(share_type).
+ job_wait:
+ type: bool
+ description:
+ - Whether to wait for the Job completion or not.
+ - The maximum wait time is I(job_wait_timeout).
+ default: True
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in minutes.
+ - This option is applicable I(job_wait) is true.
+ default: 60
+ test_connection:
+ type: bool
+ description:
+ - Test the availability of the network share location.
+ - I(job_wait) and I(job_wait_timeout) options are not applicable for I(test_connection).
+ default: False
+ lead_chassis_only:
+ type: bool
+ description:
+ - Extract the logs from Lead chassis only.
+ - I(lead_chassis_only) is only applicable when I(log_type) is C(application) on OpenManage Enterprise Modular.
+ default: False
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Sachin Apagundi(@sachin-apa)"
+notes:
+ - Run this module from a system that has direct access to OpenManage Enterprise.
+ - This module performs the test connection and device validations. It does not create a job for copying the
+ logs in check mode and always reports as changes found.
+ - This module supports C(check_mode).
+"""
+
+
+EXAMPLES = r"""
+---
+- name: Export application log using CIFS share location
+ dellemc.openmanage.ome_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_type: CIFS
+ share_address: "192.168.0.2"
+ share_user: share_username
+ share_password: share_password
+ share_name: cifs_share
+ log_type: application
+ mask_sensitive_info: false
+ test_connection: true
+
+- name: Export application log using NFS share location
+ dellemc.openmanage.ome_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_address: "192.168.0.3"
+ share_type: NFS
+ share_name: nfs_share
+ log_type: application
+ mask_sensitive_info: true
+ test_connection: true
+
+- name: Export SupportAssist log using CIFS share location
+ dellemc.openmanage.ome_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_address: "192.168.0.3"
+ share_user: share_username
+ share_password: share_password
+ share_name: cifs_share
+ share_type: CIFS
+ log_type: support_assist_collection
+ device_ids: [10011, 10022]
+ log_selectors: [OS_LOGS]
+ test_connection: true
+
+- name: Export SupportAssist log using NFS share location
+ dellemc.openmanage.ome_diagnostics:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_address: "192.168.0.3"
+ share_type: NFS
+ share_name: nfs_share
+ log_type: support_assist_collection
+ device_group_name: group_name
+ test_connection: true
+"""
+
+RETURN = r"""
+---
+msg:
+ type: str
+ description: "Overall status of the export log."
+ returned: always
+ sample: "Export log job completed successfully."
+jog_status:
+ type: dict
+ description: Details of the export log operation status.
+ returned: success
+ sample: {
+ "Builtin": false,
+ "CreatedBy": "root",
+ "Editable": true,
+ "EndTime": None,
+ "Id": 12778,
+ "JobDescription": "Export device log",
+ "JobName": "Export Log",
+ "JobStatus": {"Id": 2080, "Name": "New"},
+ "JobType": {"Id": 18, "Internal": false, "Name": "DebugLogs_Task"},
+ "LastRun": "2021-07-06 10:52:50.519",
+ "LastRunStatus": {"Id": 2060, "Name": "Completed"},
+ "NextRun": None,
+ "Schedule": "startnow",
+ "StartTime": None,
+ "State": "Enabled",
+ "UpdatedBy": None,
+ "UserGenerated": true,
+ "Visible": true,
+ "Params": [
+ {"JobId": 12778, "Key": "maskSensitiveInfo", "Value": "FALSE"},
+ {"JobId": 12778, "Key": "password", "Value": "tY86w7q92u0QzvykuF0gQQ"},
+ {"JobId": 12778, "Key": "userName", "Value": "administrator"},
+ {"JobId": 12778, "Key": "shareName", "Value": "iso"},
+ {"JobId": 12778, "Key": "OPERATION_NAME", "Value": "EXTRACT_LOGS"},
+ {"JobId": 12778, "Key": "shareType", "Value": "CIFS"},
+ {"JobId": 12778, "Key": "shareAddress", "Value": "100.96.32.142"}
+ ],
+ "Targets": [{"Data": "", "Id": 10053, "JobId": 12778, "TargetType": {"Id": 1000, "Name": "DEVICE"}}]
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+import json
+import re
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+LOG_SELECTOR = {"OS_LOGS": 1, "RAID_LOGS": 2, "DEBUG_LOGS": 3}
+JOB_URI = "JobService/Jobs"
+GROUP_URI = "GroupService/Groups"
+GROUP_DEVICE_URI = "GroupService/Groups({0})/Devices"
+DEVICE_URI = "DeviceService/Devices"
+DOMAIN_URI = "ManagementDomainService/Domains"
+EXE_HISTORY_URI = "JobService/Jobs({0})/ExecutionHistories"
+CHANGES_FOUND = "Changes found to be applied."
+
+
+def group_validation(module, rest_obj):
+ group_name, group_device = module.params.get('device_group_name'), []
+ query_param = {"$filter": "Name eq '{0}'".format(group_name)}
+ group_resp = rest_obj.invoke_request("GET", GROUP_URI, query_param=query_param)
+ group = group_resp.json_data["value"]
+ if group:
+ group_id = group[0]["Id"]
+ resp = rest_obj.invoke_request("GET", GROUP_DEVICE_URI.format(group_id))
+ device_group_resp = resp.json_data["value"]
+ if device_group_resp:
+ for device in device_group_resp:
+ if device["Type"] == 1000:
+ group_device.append(device["Id"])
+ else:
+ module.fail_json(msg="There are no device(s) present in this group.")
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered target "
+ "device group name '{0}' is invalid.".format(group_name))
+ if not group_device:
+ module.fail_json(msg="The requested group '{0}' does not contain devices that "
+ "support export log.".format(group_name))
+ return group_device
+
+
+def device_validation(module, rest_obj):
+ device_lst, invalid_lst, other_types = [], [], []
+ devices, tags = module.params.get("device_ids"), module.params.get("device_service_tags")
+ all_device = rest_obj.get_all_report_details(DEVICE_URI)
+ key = "Id" if devices is not None else "DeviceServiceTag"
+ value = "id" if key == "Id" else "service tag"
+ req_device = devices if devices is not None else tags
+ for each in req_device:
+ device = list(filter(lambda d: d[key] in [each], all_device["report_list"]))
+ if device and device[0]["Type"] == 1000:
+ device_lst.append(device[0]["Id"])
+ elif device and not device[0]["Type"] == 1000:
+ other_types.append(str(each))
+ else:
+ invalid_lst.append(str(each))
+ if invalid_lst:
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "target device {0}(s) '{1}' are invalid.".format(value, ",".join(set(invalid_lst))))
+ if not device_lst and other_types:
+ module.fail_json(msg="The requested device {0}(s) '{1}' are "
+ "not applicable for export log.".format(value, ",".join(set(other_types))))
+ return device_lst
+
+
+def extract_log_operation(module, rest_obj, device_lst=None):
+ payload_params, target_params = [], []
+ log_type = module.params["log_type"]
+ if log_type == "application":
+ lead_only = module.params["lead_chassis_only"]
+ resp_data = None
+ if lead_only:
+ domain_details = rest_obj.get_all_items_with_pagination(DOMAIN_URI)
+ key = "Id"
+ ch_device_id = None
+ for each_domain in domain_details["value"]:
+ if each_domain["DomainRoleTypeValue"] in ["LEAD", "STANDALONE"]:
+ ch_device_id = each_domain["DeviceId"]
+ if ch_device_id:
+ resp = rest_obj.invoke_request("GET", DEVICE_URI,
+ query_param={"$filter": "{0} eq {1}".format(key, ch_device_id)})
+ resp_data = resp.json_data["value"]
+ else:
+ resp = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": "Type eq 2000"})
+ resp_data = resp.json_data["value"]
+ if resp_data:
+ for dev in resp_data:
+ target_params.append({"Id": dev["Id"], "Data": "",
+ "TargetType": {"Id": dev["Type"], "Name": "CHASSIS"}})
+ else:
+ module.fail_json(msg="There is no device(s) available to export application log.")
+ else:
+ for device in device_lst:
+ target_params.append({"Id": device, "Data": "",
+ "TargetType": {"Id": 1000, "Name": "DEVICE"}})
+ payload_params.append({"Key": "shareAddress", "Value": module.params["share_address"]})
+ payload_params.append({"Key": "shareType", "Value": module.params["share_type"]})
+ payload_params.append({"Key": "OPERATION_NAME", "Value": "EXTRACT_LOGS"})
+ if module.params.get("share_name") is not None:
+ payload_params.append({"Key": "shareName", "Value": module.params["share_name"]})
+ if module.params.get("share_user") is not None:
+ payload_params.append({"Key": "userName", "Value": module.params["share_user"]})
+ if module.params.get("share_password") is not None:
+ payload_params.append({"Key": "password", "Value": module.params["share_password"]})
+ if module.params.get("share_domain") is not None:
+ payload_params.append({"Key": "domainName", "Value": module.params["share_domain"]})
+ if module.params.get("mask_sensitive_info") is not None and log_type == "application":
+ payload_params.append({"Key": "maskSensitiveInfo", "Value": str(module.params["mask_sensitive_info"]).upper()})
+ if module.params.get("log_selectors") is not None and (log_type == "support_assist_collection" or log_type == "supportassist_collection"):
+ log_lst = [LOG_SELECTOR[i] for i in module.params["log_selectors"]]
+ log_lst.sort()
+ log_selector = ",".join(map(str, log_lst))
+ payload_params.append({"Key": "logSelector", "Value": "0,{0}".format(log_selector)})
+ response = rest_obj.job_submission("Export Log", "Export device log", target_params,
+ payload_params, {"Id": 18, "Name": "DebugLogs_Task"})
+ return response
+
+
+def check_domain_service(module, rest_obj):
+ try:
+ rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5)
+ except HTTPError as err:
+ err_message = json.load(err)
+ if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
+ module.fail_json(msg="Export log operation is not supported on the specified system.")
+ return
+
+
+def find_failed_jobs(resp, rest_obj):
+ msg, fail = "Export log job completed with errors.", False
+ history = rest_obj.invoke_request("GET", EXE_HISTORY_URI.format(resp["Id"]))
+ if history.json_data["value"]:
+ hist = history.json_data["value"][0]
+ history_details = rest_obj.invoke_request(
+ "GET",
+ "{0}({1})/ExecutionHistoryDetails".format(EXE_HISTORY_URI.format(resp["Id"]), hist["Id"])
+ )
+ for hd in history_details.json_data["value"]:
+ if not re.findall(r"Job status for JID_\d+ is Completed with Errors.", hd["Value"]):
+ fail = True
+ break
+ else:
+ fail = False
+ return msg, fail
+
+
+def main():
+ specs = {
+ "device_ids": {"required": False, "type": "list", "elements": "int"},
+ "device_service_tags": {"required": False, "type": "list", "elements": "str"},
+ "device_group_name": {"required": False, "type": "str"},
+ "log_type": {"required": False, "type": "str", "default": "support_assist_collection",
+ "choices": ["support_assist_collection", "application", "supportassist_collection"]},
+ "mask_sensitive_info": {"required": False, "type": "bool", "default": False},
+ "log_selectors": {"required": False, "type": "list",
+ "choices": ["RAID_LOGS", "OS_LOGS", "DEBUG_LOGS"], "elements": "str"},
+ "share_address": {"required": True, "type": "str"},
+ "share_name": {"required": True, "type": "str"},
+ "share_type": {"required": True, "type": "str", "choices": ["NFS", "CIFS"]},
+ "share_user": {"required": False, "type": "str"},
+ "share_password": {"required": False, "type": "str", "no_log": True},
+ "share_domain": {"required": False, "type": "str"},
+ "job_wait": {"required": False, "type": "bool", "default": True},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 60},
+ "test_connection": {"required": False, "type": "bool", "default": False},
+ "lead_chassis_only": {"required": False, "type": "bool", "default": False},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ['log_type', 'application', ['mask_sensitive_info']],
+ ['log_type', 'support_assist_collection',
+ ['device_ids', 'device_service_tags', 'device_group_name'], True],
+ ['log_type', 'supportassist_collection',
+ ['device_ids', 'device_service_tags', 'device_group_name'], True],
+ ['share_type', 'CIFS', ['share_user', 'share_password']]
+ ],
+ mutually_exclusive=[('device_ids', 'device_service_tags', 'device_group_name')],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ # checking the domain service
+ if module.params["log_type"] == "application":
+ check_domain_service(module, rest_obj)
+
+ # checking any existing running job
+ job_allowed, job_lst = rest_obj.check_existing_job_state("DebugLogs_Task")
+ if not job_allowed:
+ module.fail_json(msg="An export log job is already running. Wait for the job to finish.")
+
+ # test network connection
+ if module.params["test_connection"]:
+ conn_resp = rest_obj.test_network_connection(module.params["share_address"],
+ module.params["share_name"],
+ module.params["share_type"],
+ module.params["share_user"],
+ module.params["share_password"],
+ module.params["share_domain"])
+ job_failed, job_message = rest_obj.job_tracking(conn_resp.json_data["Id"], job_wait_sec=5,
+ sleep_time=5)
+ if job_failed:
+ module.fail_json(msg="Unable to access the share. Ensure that the share address, share name, "
+ "share domain, and share credentials provided are correct.")
+
+ # validation for device id/tag/group
+ valid_device = []
+ if (module.params["log_type"] == "support_assist_collection" or module.params["log_type"] == "supportassist_collection") and \
+ module.params.get("device_group_name") is not None:
+ valid_device = group_validation(module, rest_obj)
+ elif (module.params["log_type"] == "support_assist_collection" or module.params["log_type"] == "supportassist_collection") and \
+ module.params.get("device_group_name") is None:
+ valid_device = device_validation(module, rest_obj)
+
+ # exit if running in check mode
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+
+ # extract log job operation
+ response = extract_log_operation(module, rest_obj, device_lst=valid_device)
+ message = "Export log job submitted successfully."
+ if module.params["job_wait"]:
+ seconds = module.params["job_wait_timeout"] * 60
+ job_failed, job_message = rest_obj.job_tracking(response.json_data["Id"],
+ job_wait_sec=seconds,
+ sleep_time=5)
+ message = "Export log job completed successfully."
+ if job_message == "The job is not complete after {0} seconds.".format(seconds):
+ module.fail_json(
+ msg="The export job is not complete because it has exceeded the configured timeout period.",
+ job_status=response.json_data
+ )
+ if job_failed:
+ message, failed_job = find_failed_jobs(response.json_data, rest_obj)
+ if failed_job:
+ module.fail_json(msg=message, job_status=response.json_data)
+ response = rest_obj.invoke_request("GET", "{0}({1})".format(JOB_URI, response.json_data["Id"]))
+ resp = response.json_data
+ if resp:
+ resp = rest_obj.strip_substr_dict(resp)
+ module.exit_json(msg=message, job_status=resp)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py
new file mode 100644
index 000000000..a4fde99f9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py
@@ -0,0 +1,1067 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_discovery
+short_description: Create, modify, or delete a discovery job on OpenManage Enterprise
+version_added: "3.3.0"
+description: This module allows to create, modify, or delete a discovery job.
+extends_documentation_fragment:
+ - dellemc.openmanage.oment_auth_options
+options:
+ state:
+ description:
+ - C(present) creates a discovery job or modifies an existing discovery job.
+ - I(discovery_job_name) is mandatory for the creation of a new discovery job.
+ - If multiple discoveries of the same I(discovery_job_name) exist, then the new discovery job will not be created.
+ - C(absent) deletes an existing discovery job(s) with the specified I(discovery_job_name).
+ choices: [present, absent]
+ default: present
+ type: str
+ discovery_job_name:
+ description:
+ - Name of the discovery configuration job.
+ - It is mutually exclusive with I(discovery_id).
+ type: str
+ discovery_id:
+ description:
+ - ID of the discovery configuration group.
+ - This value is DiscoveryConfigGroupId in the return values under discovery_status.
+ - It is mutually exclusive with I(discovery_job_name).
+ type: int
+ new_name:
+ description: New name of the discovery configuration job.
+ type: str
+ schedule:
+ description:
+ - Provides the option to schedule the discovery job.
+ - If C(RunLater) is selected, then I(cron) must be specified.
+ choices: [RunNow, RunLater]
+ default: RunNow
+ type: str
+ cron:
+ description:
+ - Provide a cron expression based on Quartz cron format.
+ type: str
+ trap_destination:
+ description:
+ - Enable OpenManage Enterprise to receive the incoming SNMP traps from the discovered devices.
+ - This is effective only for servers discovered by using their iDRAC interface.
+ type: bool
+ default: false
+ community_string:
+ description: "Enable the use of SNMP community strings to receive SNMP traps using Application Settings in
+ OpenManage Enterprise. This option is available only for the discovered iDRAC servers and MX7000 chassis."
+ type: bool
+ default: false
+ email_recipient:
+ description: "Enter the email address to which notifications are to be sent about the discovery job status.
+ Configure the SMTP settings to allow sending notifications to an email address."
+ type: str
+ job_wait:
+ description:
+ - Provides the option to wait for job completion.
+ - This option is applicable when I(state) is C(present).
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 10800
+ ignore_partial_failure:
+ description:
+ - "Provides the option to ignore partial failures. Partial failures occur when there is a combination of both
+ discovered and undiscovered IPs."
+ - If C(False), then the partial failure is not ignored, and the module will error out.
+ - If C(True), then the partial failure is ignored.
+ - This option is only applicable if I(job_wait) is C(True).
+ type: bool
+ default: false
+ discovery_config_targets:
+ description:
+ - Provide the list of discovery targets.
+ - "Each discovery target is a set of I(network_address_detail), I(device_types), and one or more protocol
+ credentials."
+ - This is mandatory when I(state) is C(present).
+ - "C(WARNING) Modification of this field is not supported, this field is overwritten every time. Ensure to provide
+ all the required details for this field."
+ type: list
+ elements: dict
+ suboptions:
+ network_address_detail:
+ description:
+ - "Provide the list of IP addresses, host names, or the range of IP addresses of the devices to be discovered
+ or included."
+ - "Sample Valid IP Range Formats"
+ - " 192.35.0.0"
+ - " 192.36.0.0-10.36.0.255"
+ - " 192.37.0.0/24"
+ - " 2345:f2b1:f083:135::5500/118"
+ - " 2345:f2b1:f083:135::a500-2607:f2b1:f083:135::a600"
+ - " hostname.domain.tld"
+ - " hostname"
+ - " 2345:f2b1:f083:139::22a"
+ - "Sample Invalid IP Range Formats"
+ - " 192.35.0.*"
+ - " 192.36.0.0-255"
+ - " 192.35.0.0/255.255.255.0"
+ - C(NOTE) The range size for the number of IP addresses is limited to 16,385 (0x4001).
+ - C(NOTE) Both IPv6 and IPv6 CIDR formats are supported.
+ type: list
+ elements: str
+ required: true
+ device_types:
+ description:
+ - Provide the type of devices to be discovered.
+ - The accepted types are SERVER, CHASSIS, NETWORK SWITCH, and STORAGE.
+ - A combination or all of the above can be provided.
+ - "Supported protocols for each device type are:"
+ - SERVER - I(wsman), I(redfish), I(snmp), I(ipmi), I(ssh), and I(vmware).
+ - CHASSIS - I(wsman), and I(redfish).
+ - NETWORK SWITCH - I(snmp).
+ - STORAGE - I(storage), and I(snmp).
+ type: list
+ elements: str
+ required: true
+ wsman:
+ description: Web Services-Management (WS-Man).
+ type: dict
+ suboptions:
+ username:
+ description: Provide a username for the protocol.
+ type: str
+ required: true
+ password:
+ description: Provide a password for the protocol.
+ type: str
+ required: true
+ domain:
+ description: Provide a domain for the protocol.
+ type: str
+ port:
+ description: Enter the port number that the job must use to discover the devices.
+ type: int
+ default: 443
+ retries:
+ description: Enter the number of repeated attempts required to discover a device.
+ type: int
+ default: 3
+ timeout:
+ description: Enter the time in seconds after which a job must stop running.
+ type: int
+ default: 60
+ cn_check:
+ description: Enable the Common Name (CN) check.
+ type: bool
+ default: false
+ ca_check:
+ description: Enable the Certificate Authority (CA) check.
+ type: bool
+ default: false
+ certificate_data:
+ description: Provide certificate data for the CA check.
+ type: str
+ redfish:
+ description: REDFISH protocol.
+ type: dict
+ suboptions:
+ username:
+ description: Provide a username for the protocol.
+ type: str
+ required: true
+ password:
+ description: Provide a password for the protocol.
+ type: str
+ required: true
+ domain:
+ description: Provide a domain for the protocol.
+ type: str
+ port:
+ description: Enter the port number that the job must use to discover the devices.
+ type: int
+ default: 443
+ retries:
+ description: Enter the number of repeated attempts required to discover a device.
+ type: int
+ default: 3
+ timeout:
+ description: Enter the time in seconds after which a job must stop running.
+ type: int
+ default: 60
+ cn_check:
+ description: Enable the Common Name (CN) check.
+ type: bool
+ default: false
+ ca_check:
+ description: Enable the Certificate Authority (CA) check.
+ type: bool
+ default: false
+ certificate_data:
+ description: Provide certificate data for the CA check.
+ type: str
+ snmp:
+ description: Simple Network Management Protocol (SNMP).
+ type: dict
+ suboptions:
+ community:
+ description: Community string for the SNMP protocol.
+ type: str
+ required: true
+ port:
+ description: Enter the port number that the job must use to discover the devices.
+ type: int
+ default: 161
+ retries:
+ description: Enter the number of repeated attempts required to discover a device.
+ type: int
+ default: 3
+ timeout:
+ description: Enter the time in seconds after which a job must stop running.
+ type: int
+ default: 3
+ storage:
+ description: HTTPS Storage protocol.
+ type: dict
+ suboptions:
+ username:
+ description: Provide a username for the protocol.
+ type: str
+ required: true
+ password:
+ description: Provide a password for the protocol.
+ type: str
+ required: true
+ domain:
+ description: Provide a domain for the protocol.
+ type: str
+ port:
+ description: Enter the port number that the job must use to discover the devices.
+ type: int
+ default: 443
+ retries:
+ description: Enter the number of repeated attempts required to discover a device.
+ type: int
+ default: 3
+ timeout:
+ description: Enter the time in seconds after which a job must stop running.
+ type: int
+ default: 60
+ cn_check:
+ description: Enable the Common Name (CN) check.
+ type: bool
+ default: false
+ ca_check:
+ description: Enable the Certificate Authority (CA) check.
+ type: bool
+ default: false
+ certificate_data:
+ description: Provide certificate data for the CA check.
+ type: str
+ vmware:
+ description: VMWARE protocol.
+ type: dict
+ suboptions:
+ username:
+ description: Provide a username for the protocol.
+ type: str
+ required: true
+ password:
+ description: Provide a password for the protocol.
+ type: str
+ required: true
+ domain:
+ description: Provide a domain for the protocol.
+ type: str
+ port:
+ description: Enter the port number that the job must use to discover the devices.
+ type: int
+ default: 443
+ retries:
+ description: Enter the number of repeated attempts required to discover a device.
+ type: int
+ default: 3
+ timeout:
+ description: Enter the time in seconds after which a job must stop running.
+ type: int
+ default: 60
+ cn_check:
+ description: Enable the Common Name (CN) check.
+ type: bool
+ default: false
+ ca_check:
+ description: Enable the Certificate Authority (CA) check.
+ type: bool
+ default: false
+ certificate_data:
+ description: Provide certificate data for the CA check.
+ type: str
+ ssh:
+ description: Secure Shell (SSH).
+ type: dict
+ suboptions:
+ username:
+ description: Provide a username for the protocol.
+ type: str
+ required: true
+ password:
+ description: Provide a password for the protocol.
+ type: str
+ required: true
+ port:
+ description: Enter the port number that the job must use to discover the devices.
+ type: int
+ default: 22
+ retries:
+ description: Enter the number of repeated attempts required to discover a device.
+ type: int
+ default: 3
+ timeout:
+ description: Enter the time in seconds after which a job must stop running.
+ type: int
+ default: 60
+ check_known_hosts:
+ description: Verify the known host key.
+ type: bool
+ default: false
+ is_sudo_user:
+ description: Use the SUDO option.
+ type: bool
+ default: false
+ ipmi:
+ description: Intelligent Platform Management Interface (IPMI)
+ type: dict
+ suboptions:
+ username:
+ description: Provide a username for the protocol.
+ type: str
+ required: true
+ password:
+ description: Provide a password for the protocol.
+ type: str
+ required: true
+ retries:
+ description: Enter the number of repeated attempts required to discover a device.
+ type: int
+ default: 3
+ timeout:
+ description: Enter the time in seconds after which a job must stop running.
+ type: int
+ default: 60
+ kgkey:
+ description: KgKey for the IPMI protocol.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V (@jagadeeshnv)"
+ - "Sajna Shetty (@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - This module does not support C(check_mode).
+ - If I(state) is C(present), then Idempotency is not supported.
+'''
+
+EXAMPLES = r'''
+---
+- name: Discover servers in a range
+ dellemc.openmanage.ome_discovery:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ discovery_job_name: "Discovery_server_1"
+ discovery_config_targets:
+ - network_address_detail:
+ - 192.96.24.1-192.96.24.255
+ device_types:
+ - SERVER
+ wsman:
+ username: user
+ password: password
+
+- name: Discover chassis in a range
+ dellemc.openmanage.ome_discovery:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ discovery_job_name: "Discovery_chassis_1"
+ discovery_config_targets:
+ - network_address_detail:
+ - 192.96.24.1-192.96.24.255
+ device_types:
+ - CHASSIS
+ wsman:
+ username: user
+ password: password
+
+- name: Discover switches in a range
+ dellemc.openmanage.ome_discovery:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ discovery_job_name: "Discover_switch_1"
+ discovery_config_targets:
+ - network_address_detail:
+ - 192.96.24.1-192.96.24.255
+ device_types:
+ - NETWORK SWITCH
+ snmp:
+ community: snmp_creds
+
+- name: Discover storage in a range
+ dellemc.openmanage.ome_discovery:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ discovery_job_name: "Discover_storage_1"
+ discovery_config_targets:
+ - network_address_detail:
+ - 192.96.24.1-192.96.24.255
+ device_types:
+ - STORAGE
+ storage:
+ username: user
+ password: password
+ snmp:
+ community: snmp_creds
+
+- name: Delete a discovery job
+ dellemc.openmanage.ome_discovery:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ discovery_job_name: "Discovery-123"
+
+- name: Schedule the discovery of multiple devices ignoring partial failure and enable trap to receive alerts
+ dellemc.openmanage.ome_discovery:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ discovery_job_name: "Discovery-123"
+ discovery_config_targets:
+ - network_address_detail:
+ - 192.96.24.1-192.96.24.255
+ - 192.96.0.0/24
+ - 192.96.26.108
+ device_types:
+ - SERVER
+ - CHASSIS
+ - STORAGE
+ - NETWORK SWITCH
+ wsman:
+ username: wsman_user
+ password: wsman_pwd
+ redfish:
+ username: redfish_user
+ password: redfish_pwd
+ snmp:
+ community: snmp_community
+ - network_address_detail:
+ - 192.96.25.1-192.96.25.255
+ - ipmihost
+ - esxiserver
+ - sshserver
+ device_types:
+ - SERVER
+ ssh:
+ username: ssh_user
+ password: ssh_pwd
+ vmware:
+ username: vm_user
+ password: vmware_pwd
+ ipmi:
+ username: ipmi_user
+ password: ipmi_pwd
+ schedule: RunLater
+ cron: "0 0 9 ? * MON,WED,FRI *"
+ ignore_partial_failure: True
+ trap_destination: True
+ community_string: True
+ email_recipient: test_email@company.com
+
+- name: Discover servers with ca check enabled
+ dellemc.openmanage.ome_discovery:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ discovery_job_name: "Discovery_server_ca1"
+ discovery_config_targets:
+ - network_address_detail:
+ - 192.96.24.108
+ device_types:
+ - SERVER
+ wsman:
+ username: user
+ password: password
+ ca_check: True
+ certificate_data: "{{ lookup('ansible.builtin.file', '/path/to/certificate_data_file') }}"
+
+- name: Discover chassis with ca check enabled data
+ dellemc.openmanage.ome_discovery:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ discovery_job_name: "Discovery_chassis_ca1"
+ discovery_config_targets:
+ - network_address_detail:
+ - 192.96.24.108
+ device_types:
+ - CHASSIS
+ redfish:
+ username: user
+ password: password
+ ca_check: True
+ certificate_data: "-----BEGIN CERTIFICATE-----\r\n
+ ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
+ ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
+ ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
+ aqwertyuiopasdfghjklzxcvbnmasdasagasvv=\r\n
+ -----END CERTIFICATE-----"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the discovery operation.
+ returned: always
+ type: str
+ sample: "Successfully deleted 1 discovery job(s)."
+discovery_status:
+ description:
+ - Details of the discovery job created or modified.
+ - If I(job_wait) is true, Completed and Failed IPs are also listed.
+ returned: when I(state) is C(present)
+ type: dict
+ sample: {
+ "Completed": [
+ "192.168.24.17",
+ "192.168.24.20",
+ "192.168.24.22"
+ ],
+ "Failed": [
+ "192.168.24.15",
+ "192.168.24.16",
+ "192.168.24.18",
+ "192.168.24.19",
+ "192.168.24.21",
+ "host123"
+ ],
+ "DiscoveredDevicesByType": [
+ {
+ "Count": 3,
+ "DeviceType": "SERVER"
+ }
+ ],
+ "DiscoveryConfigDiscoveredDeviceCount": 3,
+ "DiscoveryConfigEmailRecipient": "myemail@dell.com",
+ "DiscoveryConfigExpectedDeviceCount": 9,
+ "DiscoveryConfigGroupId": 125,
+ "JobDescription": "D1",
+ "JobEnabled": true,
+ "JobEndTime": "2021-01-01 06:27:29.99",
+ "JobId": 12666,
+ "JobName": "D1",
+ "JobNextRun": null,
+ "JobProgress": "100",
+ "JobSchedule": "startnow",
+ "JobStartTime": "2021-01-01 06:24:10.071",
+ "JobStatusId": 2090,
+ "LastUpdateTime": "2021-01-01 06:27:30.001",
+ "UpdatedBy": "admin"
+ }
+discovery_ids:
+ description: IDs of the discoveries with duplicate names.
+ returned: when discoveries with duplicate name exist for I(state) is C(present)
+ type: list
+ sample: [1234, 5678]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import time
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+CONFIG_GROUPS_URI = "DiscoveryConfigService/DiscoveryConfigGroups"
+DISCOVERY_JOBS_URI = "DiscoveryConfigService/Jobs"
+DELETE_JOB_URI = "DiscoveryConfigService/Actions/DiscoveryConfigService.RemoveDiscoveryGroup"
+PROTOCOL_DEVICE = "DiscoveryConfigService/ProtocolToDeviceType"
+JOB_EXEC_HISTORY = "JobService/Jobs({job_id})/ExecutionHistories"
+CONFIG_GROUPS_ID_URI = "DiscoveryConfigService/DiscoveryConfigGroups({group_id})"
+NO_CHANGES_MSG = "No changes found to be applied."
+DISC_JOB_RUNNING = "Discovery job '{name}' with ID {id} is running. Please retry after job completion."
+DISC_DEL_JOBS_SUCCESS = "Successfully deleted {n} discovery job(s)."
+MULTI_DISCOVERY = "Multiple discoveries present. Run the job again using a specific ID."
+DISCOVERY_SCHEDULED = "Successfully scheduled the Discovery job."
+DISCOVER_JOB_COMPLETE = "Successfully completed the Discovery job."
+JOB_TRACK_SUCCESS = "Discovery job has {0}."
+JOB_TRACK_FAIL = "No devices discovered, job is in {0} state."
+JOB_TRACK_UNABLE = "Unable to track discovery job status of {0}."
+JOB_TRACK_INCOMPLETE = "Discovery job {0} incomplete after polling {1} times."
+INVALID_DEVICES = "Invalid device types found - {0}."
+DISCOVERY_PARTIAL = "Some IPs are not discovered."
+ATLEAST_ONE_PROTOCOL = "Protocol not applicable for given device types."
+INVALID_DISCOVERY_ID = "Invalid discovery ID provided."
+SETTLING_TIME = 5
+
+
+def check_existing_discovery(module, rest_obj):
+ discovery_cfgs = []
+ discovery_id = module.params.get("discovery_id")
+ srch_key = "DiscoveryConfigGroupName"
+ srch_val = module.params.get("discovery_job_name")
+ if discovery_id:
+ srch_key = "DiscoveryConfigGroupId"
+ srch_val = module.params.get("discovery_id")
+ resp = rest_obj.invoke_request('GET', CONFIG_GROUPS_URI + "?$top=9999")
+ discovs = resp.json_data.get("value")
+ for d in discovs:
+ if d[srch_key] == srch_val:
+ discovery_cfgs.append(d)
+ if discovery_id:
+ break
+ return discovery_cfgs
+
+
+def get_discovery_states(rest_obj, key="JobStatusId"):
+ resp = rest_obj.invoke_request('GET', DISCOVERY_JOBS_URI)
+ disc_jobs = resp.json_data.get("value")
+ job_state_dict = dict([(item["DiscoveryConfigGroupId"], item[key]) for item in disc_jobs])
+ return job_state_dict
+
+
+def get_protocol_device_map(rest_obj):
+ prot_dev_map = {}
+ dev_id_map = {}
+ resp = rest_obj.invoke_request('GET', PROTOCOL_DEVICE)
+ prot_dev = resp.json_data.get('value')
+ for item in prot_dev:
+ dname = item["DeviceTypeName"]
+ dlist = prot_dev_map.get(dname, [])
+ dlist.append(item["ProtocolName"])
+ prot_dev_map[dname] = dlist
+ dev_id_map[dname] = item["DeviceTypeId"]
+ if dname == "DELL STORAGE":
+ prot_dev_map['STORAGE'] = dlist
+ dev_id_map['STORAGE'] = item["DeviceTypeId"]
+ return prot_dev_map, dev_id_map
+
+
+def get_other_discovery_payload(module):
+ trans_dict = {'discovery_job_name': "DiscoveryConfigGroupName",
+ 'trap_destination': "TrapDestination",
+ 'community_string': "CommunityString",
+ 'email_recipient': "DiscoveryStatusEmailRecipient"}
+ other_dict = {}
+ for key, val in trans_dict.items():
+ if module.params.get(key) is not None:
+ other_dict[val] = module.params.get(key)
+ return other_dict
+
+
+def get_schedule(module):
+ schedule_payload = {}
+ schedule = module.params.get('schedule')
+ if not schedule or schedule == 'RunNow':
+ schedule_payload['RunNow'] = True
+ schedule_payload['RunLater'] = False
+ schedule_payload['Cron'] = 'startnow'
+ else:
+ schedule_payload['RunNow'] = False
+ schedule_payload['RunLater'] = True
+ schedule_payload['Cron'] = module.params.get('cron')
+ return schedule_payload
+
+
+def get_execution_details(module, rest_obj, job_id):
+ try:
+ resp = rest_obj.invoke_request('GET', JOB_EXEC_HISTORY.format(job_id=job_id))
+ ex_hist = resp.json_data.get('value')
+ # Sorting based on startTime and to get latest execution instance.
+ tmp_dict = dict((x["StartTime"], x["Id"]) for x in ex_hist)
+ sorted_dates = sorted(tmp_dict.keys())
+ ex_url = JOB_EXEC_HISTORY.format(job_id=job_id) + "({0})/ExecutionHistoryDetails".format(tmp_dict[sorted_dates[-1]])
+ ips = {"Completed": [], "Failed": []}
+ all_exec = rest_obj.get_all_items_with_pagination(ex_url)
+ for jb_ip in all_exec.get('value'):
+ jobstatus = jb_ip.get('JobStatus', {}).get('Name', 'Unknown')
+ jlist = ips.get(jobstatus, [])
+ jlist.append(jb_ip.get('Key'))
+ ips[jobstatus] = jlist
+ except Exception:
+ ips = {"Completed": [], "Failed": []}
+ return ips
+
+
+def discovery_job_tracking(rest_obj, job_id, job_wait_sec):
+ job_status_map = {
+ 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "completed successfully",
+ 2070: "Failed", 2090: "completed with errors", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped",
+ 2103: "Canceled"
+ }
+ sleep_interval = 30
+ max_retries = job_wait_sec // sleep_interval
+ failed_job_status = [2070, 2100, 2101, 2102, 2103]
+ success_job_status = [2060, 2020, 2090]
+ job_url = (DISCOVERY_JOBS_URI + "({job_id})").format(job_id=job_id)
+ loop_ctr = 0
+ job_failed = True
+ time.sleep(SETTLING_TIME)
+ while loop_ctr < max_retries:
+ loop_ctr += 1
+ try:
+ job_resp = rest_obj.invoke_request('GET', job_url)
+ job_dict = job_resp.json_data
+ job_status = job_dict['JobStatusId']
+ if job_status in success_job_status:
+ job_failed = False
+ return job_failed, JOB_TRACK_SUCCESS.format(job_status_map[job_status])
+ elif job_status in failed_job_status:
+ job_failed = True
+ return job_failed, JOB_TRACK_FAIL.format(job_status_map[job_status])
+ time.sleep(sleep_interval)
+ except HTTPError:
+ return job_failed, JOB_TRACK_UNABLE.format(job_id)
+ except Exception as err:
+ return job_failed, str(err)
+ return job_failed, JOB_TRACK_INCOMPLETE.format(job_id, max_retries)
+
+
+def get_job_data(discovery_json, rest_obj):
+ job_list = discovery_json['DiscoveryConfigTaskParam']
+ if len(job_list) == 1:
+ job_id = job_list[0].get('TaskId')
+ else:
+ srch_key = 'DiscoveryConfigGroupId'
+ srch_val = discovery_json[srch_key]
+ resp = rest_obj.invoke_request('GET', DISCOVERY_JOBS_URI + "?$top=9999")
+ discovs = resp.json_data.get("value")
+ for d in discovs:
+ if d[srch_key] == srch_val:
+ job_id = d['JobId']
+ break
+ return job_id
+
+
+def get_connection_profile(disc_config):
+ proto_add_dict = {
+ 'wsman': {
+ 'certificateDetail': None,
+ 'isHttp': False,
+ 'keepAlive': True,
+ # 'version': None
+ },
+ 'redfish': {'certificateDetail': None, 'isHttp': False, 'keepAlive': True},
+ 'snmp': {
+ # 'authenticationPassphrase': None,
+ # 'authenticationProtocol': None,
+ 'enableV1V2': True,
+ 'enableV3': False,
+ # 'localizationEngineID': None,
+ # 'privacyPassphrase': None,
+ # 'privacyProtocol': None,
+ # 'securityName': None
+ },
+ 'vmware': {'certificateDetail': None, 'isHttp': False, 'keepAlive': False},
+ 'ssh': {'useKey': False, 'key': None, 'knownHostKey': None, 'passphrase': None},
+ 'ipmi': {'privilege': 2},
+ 'storage': {
+ 'certificateDetail': None,
+ 'isHttp': False,
+ 'keepAlive': True,
+ # 'version': None
+ }
+ }
+ proto_list = ['wsman', 'snmp', 'vmware', 'ssh', 'ipmi', 'redfish', 'storage']
+ conn_profile = {"profileId": 0, "profileName": "", "profileDescription": "", "type": "DISCOVERY"}
+ creds_dict = {}
+ for p in proto_list:
+ if disc_config.get(p):
+ xproto = {"type": p.upper(),
+ "authType": "Basic",
+ "modified": False}
+ xproto['credentials'] = snake_dict_to_camel_dict(disc_config[p])
+ (xproto['credentials']).update(proto_add_dict.get(p, {}))
+ creds_dict[p] = xproto
+ # Special handling, duplicating wsman to redfish as in GUI
+ if p == 'wsman':
+ rf = xproto.copy()
+ rf['type'] = 'REDFISH'
+ creds_dict['redfish'] = rf
+ conn_profile['credentials'] = list(creds_dict.values())
+ return conn_profile
+
+
+def get_discovery_config(module, rest_obj):
+ disc_cfg_list = []
+ proto_dev_map, dev_id_map = get_protocol_device_map(rest_obj)
+ discovery_config_list = module.params.get("discovery_config_targets")
+ for disc_config in discovery_config_list:
+ disc_cfg = {}
+ disc_cfg['DeviceType'] = list(
+ dev_id_map[dev] for dev in disc_config.get('device_types') if dev in dev_id_map.keys())
+ devices = list(set(disc_config.get('device_types')))
+ if len(devices) != len(disc_cfg['DeviceType']):
+ invalid_dev = set(devices) - set(dev_id_map.keys())
+ module.fail_json(msg=INVALID_DEVICES.format(','.join(invalid_dev)))
+ disc_cfg["DiscoveryConfigTargets"] = list({"NetworkAddressDetail": ip} for ip in disc_config["network_address_detail"])
+ conn_profile = get_connection_profile(disc_config)
+ given_protos = list(x["type"] for x in conn_profile['credentials'])
+ req_protos = []
+ for dev in disc_config.get('device_types'):
+ proto_dev_value = proto_dev_map.get(dev, [])
+ req_protos.extend(proto_dev_value)
+ if not (set(req_protos) & set(given_protos)):
+ module.fail_json(msg=ATLEAST_ONE_PROTOCOL, discovery_status=proto_dev_map)
+ disc_cfg["ConnectionProfile"] = json.dumps(conn_profile)
+ disc_cfg_list.append(disc_cfg)
+ return disc_cfg_list
+
+
+def get_discovery_job(rest_obj, job_id):
+ resp = rest_obj.invoke_request('GET', DISCOVERY_JOBS_URI + "({0})".format(job_id))
+ djob = resp.json_data
+ nlist = list(djob)
+ for k in nlist:
+ if str(k).lower().startswith('@odata'):
+ djob.pop(k)
+ return djob
+
+
+def exit_discovery(module, rest_obj, job_id):
+ msg = DISCOVERY_SCHEDULED
+ time.sleep(SETTLING_TIME)
+ djob = get_discovery_job(rest_obj, job_id)
+ if module.params.get("job_wait") and module.params.get('schedule') == 'RunNow':
+ job_failed, job_message = discovery_job_tracking(rest_obj, job_id,
+ job_wait_sec=module.params["job_wait_timeout"])
+ if job_failed is True:
+ djob.update({"Completed": [], "Failed": []})
+ module.fail_json(msg=job_message, discovery_status=djob)
+ msg = job_message
+ ip_details = get_execution_details(module, rest_obj, job_id)
+ djob = get_discovery_job(rest_obj, job_id)
+ djob.update(ip_details)
+ if ip_details.get("Failed") and module.params.get("ignore_partial_failure") is False:
+ module.fail_json(msg=DISCOVERY_PARTIAL, discovery_status=djob)
+ module.exit_json(msg=msg, discovery_status=djob, changed=True)
+
+
+def create_discovery(module, rest_obj):
+ discovery_payload = {}
+ discovery_payload['DiscoveryConfigModels'] = get_discovery_config(module, rest_obj)
+ discovery_payload['Schedule'] = get_schedule(module)
+ other_params = get_other_discovery_payload(module)
+ discovery_payload.update(other_params)
+ resp = rest_obj.invoke_request("POST", CONFIG_GROUPS_URI, data=discovery_payload)
+ job_id = get_job_data(resp.json_data, rest_obj)
+ exit_discovery(module, rest_obj, job_id)
+
+
+def update_modify_payload(discovery_modify_payload, current_payload, new_name=None):
+ parent_items = ["DiscoveryConfigGroupName",
+ "TrapDestination",
+ "CommunityString",
+ "DiscoveryStatusEmailRecipient",
+ "CreateGroup",
+ "UseAllProfiles"]
+ for key in parent_items:
+ if key not in discovery_modify_payload and key in current_payload:
+ discovery_modify_payload[key] = current_payload[key]
+ if not discovery_modify_payload.get("Schedule"):
+ exist_schedule = current_payload.get("Schedule", {})
+ schedule_payload = {}
+ if exist_schedule.get('Cron') == 'startnow':
+ schedule_payload['RunNow'] = True
+ schedule_payload['RunLater'] = False
+ schedule_payload['Cron'] = 'startnow'
+ else:
+ schedule_payload['RunNow'] = False
+ schedule_payload['RunLater'] = True
+ schedule_payload['Cron'] = exist_schedule.get('Cron')
+ discovery_modify_payload['Schedule'] = schedule_payload
+ discovery_modify_payload["DiscoveryConfigGroupId"] = current_payload["DiscoveryConfigGroupId"]
+ if new_name:
+ discovery_modify_payload["DiscoveryConfigGroupName"] = new_name
+
+
+def modify_discovery(module, rest_obj, discov_list):
+ if len(discov_list) > 1:
+ dup_discovery = list(item["DiscoveryConfigGroupId"] for item in discov_list)
+ module.fail_json(msg=MULTI_DISCOVERY, discovery_ids=dup_discovery)
+ job_state_dict = get_discovery_states(rest_obj)
+ for d in discov_list:
+ if job_state_dict.get(d["DiscoveryConfigGroupId"]) == 2050:
+ module.fail_json(
+ msg=DISC_JOB_RUNNING.format(name=d["DiscoveryConfigGroupName"], id=d["DiscoveryConfigGroupId"]))
+ discovery_payload = {'DiscoveryConfigModels': get_discovery_config(module, rest_obj),
+ 'Schedule': get_schedule(module)}
+ other_params = get_other_discovery_payload(module)
+ discovery_payload.update(other_params)
+ update_modify_payload(discovery_payload, discov_list[0], module.params.get("new_name"))
+ resp = rest_obj.invoke_request("PUT",
+ CONFIG_GROUPS_ID_URI.format(group_id=discovery_payload["DiscoveryConfigGroupId"]),
+ data=discovery_payload)
+ job_id = get_job_data(resp.json_data, rest_obj)
+ exit_discovery(module, rest_obj, job_id)
+
+
+def delete_discovery(module, rest_obj, discov_list):
+ job_state_dict = get_discovery_states(rest_obj)
+ delete_ids = []
+ for d in discov_list:
+ if job_state_dict.get(d["DiscoveryConfigGroupId"]) == 2050:
+ module.fail_json(msg=DISC_JOB_RUNNING.format(name=d["DiscoveryConfigGroupName"],
+ id=d["DiscoveryConfigGroupId"]))
+ else:
+ delete_ids.append(d["DiscoveryConfigGroupId"])
+ delete_payload = {"DiscoveryGroupIds": delete_ids}
+ rest_obj.invoke_request('POST', DELETE_JOB_URI, data=delete_payload)
+ module.exit_json(msg=DISC_DEL_JOBS_SUCCESS.format(n=len(delete_ids)), changed=True)
+
+
+def main():
+ http_creds = {"username": {"type": 'str', "required": True},
+ "password": {"type": 'str', "required": True, "no_log": True},
+ "domain": {"type": 'str'},
+ "retries": {"type": 'int', "default": 3},
+ "timeout": {"type": 'int', "default": 60},
+ "port": {"type": 'int', "default": 443},
+ "cn_check": {"type": 'bool', "default": False},
+ "ca_check": {"type": 'bool', "default": False},
+ "certificate_data": {"type": 'str', "no_log": True}
+ }
+ snmp_creds = {"community": {"type": 'str', "required": True},
+ "retries": {"type": 'int', "default": 3},
+ "timeout": {"type": 'int', "default": 3},
+ "port": {"type": 'int', "default": 161},
+ }
+ ssh_creds = {"username": {"type": 'str', "required": True},
+ "password": {"type": 'str', "required": True, "no_log": True},
+ "retries": {"type": 'int', "default": 3},
+ "timeout": {"type": 'int', "default": 60},
+ "port": {"type": 'int', "default": 22},
+ "check_known_hosts": {"type": 'bool', "default": False},
+ "is_sudo_user": {"type": 'bool', "default": False}
+ }
+ ipmi_creds = {"username": {"type": 'str', "required": True},
+ "password": {"type": 'str', "required": True, "no_log": True},
+ "retries": {"type": 'int', "default": 3},
+ "timeout": {"type": 'int', "default": 60},
+ "kgkey": {"type": 'str', "no_log": True}
+ }
+ DiscoveryConfigModel = {"device_types": {"required": True, 'type': 'list', "elements": 'str'},
+ "network_address_detail": {"required": True, "type": 'list', "elements": 'str'},
+ "wsman": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "storage": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "redfish": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "vmware": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "snmp": {"type": 'dict', "options": snmp_creds},
+ "ssh": {"type": 'dict', "options": ssh_creds},
+ "ipmi": {"type": 'dict', "options": ipmi_creds},
+ }
+ specs = {
+ "discovery_job_name": {"type": 'str'},
+ "discovery_id": {"type": 'int'},
+ "state": {"default": "present", "choices": ['present', 'absent']},
+ "new_name": {"type": 'str'},
+ "discovery_config_targets":
+ {"type": 'list', "elements": 'dict', "options": DiscoveryConfigModel,
+ "required_one_of": [
+ ('wsman', 'storage', 'redfish', 'vmware', 'snmp', 'ssh', 'ipmi')
+ ]},
+ "schedule": {"default": 'RunNow', "choices": ['RunNow', 'RunLater']},
+ "cron": {"type": 'str'},
+ "job_wait": {"type": 'bool', "default": True},
+ "job_wait_timeout": {"type": 'int', "default": 10800},
+ "trap_destination": {"type": 'bool', "default": False},
+ "community_string": {"type": 'bool', "default": False},
+ "email_recipient": {"type": 'str'},
+ "ignore_partial_failure": {"type": 'bool', "default": False}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ['state', 'present', ('discovery_config_targets',)],
+ ['schedule', 'RunLater', ('cron',)]
+ ],
+ required_one_of=[('discovery_job_name', 'discovery_id')],
+ mutually_exclusive=[('discovery_job_name', 'discovery_id')],
+ supports_check_mode=False
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ discov_list = check_existing_discovery(module, rest_obj)
+ if module.params.get('state') == 'absent':
+ if discov_list:
+ delete_discovery(module, rest_obj, discov_list)
+ module.exit_json(msg=NO_CHANGES_MSG)
+ else:
+ if discov_list:
+ modify_discovery(module, rest_obj, discov_list)
+ else:
+ if module.params.get('discovery_id'):
+ module.fail_json(msg=INVALID_DISCOVERY_ID)
+ create_discovery(module, rest_obj)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py
new file mode 100644
index 000000000..7b74c306e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: ome_domain_user_groups
+short_description: Create, modify, or delete an Active Directory user group on
+ OpenManage Enterprise and OpenManage Enterprise Modular
+version_added: "4.0.0"
+description: This module allows to create, modify, or delete an Active Directory user group on
+ OpenManage Enterprise and OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ state:
+ type: str
+ description:
+ - C(present) imports or modifies the Active Directory user group.
+ - C(absent) deletes an existing Active Directory user group.
+ choices: [present, absent]
+ default: present
+ group_name:
+ type: str
+ required: True
+ description:
+ - The desired Active Directory user group name to be imported or removed.
+ - "Examples for user group name: Administrator or Account Operators or Access Control Assistance Operator."
+ - I(group_name) value is case insensitive.
+ role:
+ type: str
+ description:
+ - The desired roles and privilege for the imported Active Directory user group.
+ - "OpenManage Enterprise Modular Roles: CHASSIS ADMINISTRATOR, COMPUTE MANAGER, STORAGE MANAGER,
+ FABRIC MANAGER, VIEWER."
+ - "OpenManage Enterprise Roles: ADMINISTRATOR, DEVICE MANAGER, VIEWER."
+ - I(role) value is case insensitive.
+ directory_name:
+ type: str
+ description:
+ - The directory name set while adding the Active Directory.
+ - I(directory_name) is mutually exclusive with I(directory_id).
+ directory_id:
+ type: int
+ description:
+ - The ID of the Active Directory.
+ - I(directory_id) is mutually exclusive with I(directory_name).
+ domain_username:
+ type: str
+ description:
+ - Active directory domain username.
+ - "Example: username@domain or domain\\username."
+ domain_password:
+ type: str
+ description:
+ - Active directory domain password.
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - This module supports C(check_mode) and idempotency.
+ - Run this module from a system that has direct access to OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+"""
+
+EXAMPLES = r"""
+---
+- name: Create Active Directory user group
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ group_name: account operators
+ directory_name: directory_name
+ role: administrator
+ domain_username: username@domain
+ domain_password: domain_password
+
+- name: Update Active Directory user group
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ group_name: account operators
+ role: viewer
+
+- name: Delete active directory user group
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ group_name: administrators
+"""
+
+RETURN = r"""
+---
+msg:
+ type: str
+ description: Overall status of the Active Directory user group operation.
+ returned: always
+ sample: Successfully imported the active directory user group.
+domain_user_status:
+ description: Details of the domain user operation, when I(state) is C(present).
+ returned: When I(state) is C(present).
+ type: dict
+ sample: {
+ "Description": null,
+ "DirectoryServiceId": 16097,
+ "Enabled": true,
+ "Id": "16617",
+ "IsBuiltin": false,
+ "IsVisible": true,
+ "Locked": false,
+ "Name": "Account Operators",
+ "ObjectGuid": "a491859c-031e-42a3-ae5e-0ab148ecf1d6",
+ "ObjectSid": null,
+ "Oem": null,
+ "Password": null,
+ "PlainTextPassword": null,
+ "RoleId": "16",
+ "UserName": "Account Operators",
+ "UserTypeId": 2
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+ROLE_URI = "AccountService/Roles"
+ACCOUNT_URI = "AccountService/Accounts"
+GET_AD_ACC = "AccountService/ExternalAccountProvider/ADAccountProvider"
+IMPORT_ACC_PRV = "AccountService/Actions/AccountService.ImportExternalAccountProvider"
+SEARCH_AD = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.SearchGroups"
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+
+
+def get_directory(module, rest_obj):
+ user_dir_name = module.params.get("directory_name")
+ user_dir_id = module.params.get("directory_id")
+ key = "name" if user_dir_name is not None else "id"
+ value = user_dir_name if user_dir_name is not None else user_dir_id
+ dir_id = None
+ if user_dir_name is None and user_dir_id is None:
+ module.fail_json(msg="missing required arguments: directory_name or directory_id")
+ directory_resp = rest_obj.invoke_request("GET", GET_AD_ACC)
+ for dire in directory_resp.json_data["value"]:
+ if user_dir_name is not None and dire["Name"] == user_dir_name:
+ dir_id = dire["Id"]
+ break
+ if user_dir_id is not None and dire["Id"] == user_dir_id:
+ dir_id = dire["Id"]
+ break
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "directory {0} '{1}' does not exist.".format(key, value))
+ return dir_id
+
+
+def search_directory(module, rest_obj, dir_id):
+ group_name, obj_gui_id, common_name = module.params["group_name"], None, None
+ payload = {"DirectoryServerId": dir_id, "Type": "AD",
+ "UserName": module.params["domain_username"],
+ "Password": module.params["domain_password"],
+ "CommonName": group_name}
+ try:
+ resp = rest_obj.invoke_request("POST", SEARCH_AD, data=payload)
+ for ad in resp.json_data:
+ if ad["CommonName"].lower() == group_name.lower():
+ obj_gui_id = ad["ObjectGuid"]
+ common_name = ad["CommonName"]
+ break
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "group name '{0}' does not exist.".format(group_name))
+ except HTTPError as err:
+ error = json.load(err)
+ if error['error']['@Message.ExtendedInfo'][0]['MessageId'] in ["CGEN1004", "CSEC5022"]:
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "domain username or domain password are invalid.")
+ return obj_gui_id, common_name
+
+
+def directory_user(module, rest_obj):
+ user = get_directory_user(module, rest_obj)
+ new_role_id = get_role(module, rest_obj)
+ dir_id = get_directory(module, rest_obj)
+ domain_resp, msg = None, ''
+ if user is None:
+ obj_gui_id, common_name = search_directory(module, rest_obj, dir_id)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ payload = [
+ {"UserTypeId": 2, "DirectoryServiceId": dir_id, "Description": None,
+ "Name": common_name, "Password": "", "UserName": common_name, "RoleId": new_role_id, "Locked": False,
+ "IsBuiltin": False, "Enabled": True, "ObjectGuid": obj_gui_id}
+ ]
+ domain_resp = rest_obj.invoke_request("POST", IMPORT_ACC_PRV, data=payload)
+ msg = 'imported'
+ else:
+ if (int(user["RoleId"]) == new_role_id):
+ user = rest_obj.strip_substr_dict(user)
+ module.exit_json(msg=NO_CHANGES_MSG, domain_user_status=user)
+ else:
+ payload = {"Id": str(user["Id"]), "UserTypeId": 2, "DirectoryServiceId": dir_id,
+ "UserName": user["UserName"], "RoleId": str(new_role_id), "Enabled": user["Enabled"]}
+ update_uri = "{0}('{1}')".format(ACCOUNT_URI, user['Id'])
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True, domain_user_status=payload)
+ domain_resp = rest_obj.invoke_request("PUT", update_uri, data=payload)
+ msg = 'updated'
+ if domain_resp is None:
+ module.fail_json(msg="Unable to complete the Active Directory user account.")
+ return domain_resp.json_data, msg
+
+
+def get_role(module, rest_obj):
+ role_name, role_id = module.params.get("role"), None
+ if role_name is None:
+ module.fail_json(msg="missing required arguments: role")
+ resp_role = rest_obj.invoke_request("GET", ROLE_URI)
+ role_list = resp_role.json_data["value"]
+ for role in role_list:
+ if role["Name"] == role_name.upper().replace(" ", "_"):
+ role_id = int(role["Id"])
+ break
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "role name '{0}' does not exist.".format(role_name))
+ return role_id
+
+
+def get_directory_user(module, rest_obj):
+ user_group_name, user = module.params.get("group_name"), None
+ state = module.params["state"]
+ if user_group_name is None:
+ module.fail_json(msg="missing required arguments: group_name")
+ user_resp = rest_obj.invoke_request('GET', ACCOUNT_URI)
+ for usr in user_resp.json_data["value"]:
+ if usr["UserName"].lower() == user_group_name.lower() and usr["UserTypeId"] == 2:
+ user = usr
+ if module.check_mode and state == "absent":
+ user = rest_obj.strip_substr_dict(usr)
+ module.exit_json(msg=CHANGES_FOUND, changed=True, domain_user_status=user)
+ break
+ else:
+ if state == "absent":
+ module.exit_json(msg=NO_CHANGES_MSG)
+ return user
+
+
+def delete_directory_user(rest_obj, user_id):
+ delete_uri, changed = "{0}('{1}')".format(ACCOUNT_URI, user_id), False
+ msg = "Invalid active directory user group name provided."
+ resp = rest_obj.invoke_request('DELETE', delete_uri)
+ if resp.status_code == 204:
+ changed = True
+ msg = "Successfully deleted the active directory user group."
+ return msg, changed
+
+
+def main():
+ specs = {
+ "state": {"required": False, "type": 'str', "default": "present",
+ "choices": ['present', 'absent']},
+ "group_name": {"required": True, "type": 'str'},
+ "role": {"required": False, "type": 'str'},
+ "directory_name": {"required": False, "type": 'str'},
+ "directory_id": {"required": False, "type": 'int'},
+ "domain_username": {"required": False, "type": 'str'},
+ "domain_password": {"required": False, "type": 'str', "no_log": True},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[['directory_name', 'directory_id'], ],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ if module.params["state"] == "present":
+ resp, msg = directory_user(module, rest_obj)
+ if isinstance(resp, list):
+ resp = resp[0]
+ module.exit_json(
+ msg="Successfully {0} the active directory user group.".format(msg),
+ domain_user_status=resp, changed=True
+ )
+ if module.params["state"] == "absent":
+ user = get_directory_user(module, rest_obj)
+ msg, changed = delete_directory_user(rest_obj, int(user["Id"]))
+ user = rest_obj.strip_substr_dict(user)
+ module.exit_json(msg=msg, changed=changed, domain_user_status=user)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py
new file mode 100644
index 000000000..a3bfff955
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py
@@ -0,0 +1,653 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_firmware
+short_description: Update firmware on PowerEdge devices and its components through OpenManage Enterprise
+version_added: "2.0.0"
+description: This module updates the firmware of PowerEdge devices and all its components through OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ device_service_tag:
+ description:
+ - List of service tags of the targeted devices.
+ - Either I(device_id) or I(device_service_tag) can be used individually or together.
+ - This option is mutually exclusive with I(device_group_names) and I(devices).
+ type: list
+ elements: str
+ device_id:
+ description:
+ - List of ids of the targeted device.
+ - Either I(device_id) or I(device_service_tag) can be used individually or together.
+ - This option is mutually exclusive with I(device_group_names) and I(devices).
+ type: list
+ elements: int
+ device_group_names:
+ description:
+ - Enter the name of the device group that contains the devices on which firmware needs to be updated.
+ - This option is mutually exclusive with I(device_id) and I(device_service_tag).
+ type: list
+ elements: str
+ dup_file:
+ description:
+ - "The path of the Dell Update Package (DUP) file that contains the firmware or drivers required to update the
+ target system device or individual device components."
+ - This is mutually exclusive with I(baseline_name), I(components), and I(devices).
+ type: path
+ baseline_name:
+ description:
+ - Enter the baseline name to update the firmware of all devices or list of devices that are not complaint.
+ - This option is mutually exclusive with I(dup_file) and I(device_group_names).
+ type: str
+ components:
+ description:
+ - List of components to be updated.
+ - If not provided, all components applicable are considered.
+ - This option is case sensitive.
+ - This is applicable to I(device_service_tag), I(device_id), and I(baseline_name).
+ type: list
+ elements: str
+ devices:
+ description:
+ - This option allows to select components on each device for firmware update.
+ - This option is mutually exclusive with I(dup_file), I(device_group_names), I(device_id), and I(device_service_tag).
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ type: int
+ description:
+ - The id of the target device to be updated.
+ - This option is mutually exclusive with I(service_tag).
+ service_tag:
+ type: str
+ description:
+ - The service tag of the target device to be updated.
+ - This option is mutually exclusive with I(id).
+ components:
+ description: The target components to be updated. If not specified, all applicable device components are considered.
+ type: list
+ elements: str
+ schedule:
+ type: str
+ description:
+ - Select the schedule for the firmware update.
+ - if C(StageForNextReboot) is chosen, the firmware will be staged and updated during the next reboot
+ of the target device.
+ - if C(RebootNow) will apply the firmware updates immediately.
+ choices:
+ - RebootNow
+ - StageForNextReboot
+ default: RebootNow
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Jagadeesh N V (@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Update firmware from DUP file using device ids
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id:
+ - 11111
+ - 22222
+ dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE"
+
+- name: Update firmware from a DUP file using a device service tags
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag:
+ - KLBR111
+ - KLBR222
+ dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE"
+
+- name: Update firmware from a DUP file using a device group names
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_group_names:
+ - servers
+ dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE"
+
+- name: Update firmware using baseline name
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+
+- name: Stage firmware for the next reboot using baseline name
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ schedule: StageForNextReboot
+
+- name: "Update firmware using baseline name and components."
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ components:
+ - BIOS
+
+- name: Update firmware of device components from a DUP file using a device ids in a baseline
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ device_id:
+ - 11111
+ - 22222
+ components:
+ - iDRAC with Lifecycle Controller
+
+- name: Update firmware of device components from a baseline using a device service tags under a baseline
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ device_service_tag:
+ - KLBR111
+ - KLBR222
+ components:
+ - IOM-SAS
+
+- name: Update firmware using baseline name with a device id and required components
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ devices:
+ - id: 12345
+ components:
+ - Lifecycle Controller
+ - id: 12346
+ components:
+ - Enterprise UEFI Diagnostics
+ - BIOS
+
+- name: "Update firmware using baseline name with a device service tag and required components."
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ devices:
+ - service_tag: ABCDE12
+ components:
+ - PERC H740P Adapter
+ - BIOS
+ - service_tag: GHIJK34
+ components:
+ - OS Drivers Pack
+
+- name: "Update firmware using baseline name with a device service tag or device id and required components."
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ devices:
+ - service_tag: ABCDE12
+ components:
+ - BOSS-S1 Adapter
+ - PowerEdge Server BIOS
+ - id: 12345
+ components:
+ - iDRAC with Lifecycle Controller
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: "Overall firmware update status."
+ returned: always
+ sample: Successfully submitted the firmware update job.
+update_status:
+ type: dict
+ description: The firmware update job and progress details from the OME.
+ returned: success
+ sample: {
+ 'LastRun': None,
+ 'CreatedBy': 'user',
+ 'Schedule': 'startnow',
+ 'LastRunStatus': {
+ 'Id': 1111,
+ 'Name': 'NotRun'
+ },
+ 'Builtin': False,
+ 'Editable': True,
+ 'NextRun': None,
+ 'JobStatus': {
+ 'Id': 1111,
+ 'Name': 'New'
+ },
+ 'JobName': 'Firmware Update Task',
+ 'Visible': True,
+ 'State': 'Enabled',
+ 'JobDescription': 'dup test',
+ 'Params': [{
+ 'Value': 'true',
+ 'Key': 'signVerify',
+ 'JobId': 11111}, {
+ 'Value': 'false',
+ 'Key': 'stagingValue',
+ 'JobId': 11112}, {
+ 'Value': 'false',
+ 'Key': 'complianceUpdate',
+ 'JobId': 11113}, {
+ 'Value': 'INSTALL_FIRMWARE',
+ 'Key': 'operationName',
+ 'JobId': 11114}],
+ 'Targets': [{
+ 'TargetType': {
+ 'Id': 1000,
+ 'Name': 'DEVICE'},
+ 'Data': 'DCIM:INSTALLED#701__NIC.Mezzanine.1A-1-1=1234567654321',
+ 'Id': 11115,
+ 'JobId': 11116}],
+ 'StartTime': None,
+ 'UpdatedBy': None,
+ 'EndTime': None,
+ 'Id': 11117,
+ 'JobType': {
+ 'Internal': False,
+ 'Id': 5,
+ 'Name': 'Update_Task'}
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+
+COMPLIANCE_URI = "UpdateService/Baselines({0})/DeviceComplianceReports"
+BASELINE_URI = "UpdateService/Baselines"
+FW_JOB_DESC = "Firmware update task initiated from OpenManage Ansible Module collections"
+NO_CHANGES_MSG = "No changes found to be applied. Either there are no updates present or components specified are not" \
+ " found in the baseline."
+COMPLIANCE_READ_FAIL = "Failed to read compliance report."
+DUP_REQ_MSG = "Parameter 'dup_file' to be provided along with 'device_id'|'device_service_tag'|'device_group_names'"
+APPLICABLE_DUP = "Unable to get applicable components DUP."
+CHANGES_FOUND = "Changes found to be applied."
+
+
+def spawn_update_job(rest_obj, job_payload):
+ """Spawns an update job and tracks it to completion."""
+ job_uri, job_details = "JobService/Jobs", {}
+ job_resp = rest_obj.invoke_request("POST", job_uri, data=job_payload)
+ if job_resp.status_code == 201:
+ job_details = job_resp.json_data
+ return job_details
+
+
+def job_payload_for_update(rest_obj, module, target_data, baseline=None):
+ """Formulate the payload to initiate a firmware update job."""
+ resp = rest_obj.get_job_type_id("Update_Task")
+ if resp is None:
+ module.fail_json(msg="Unable to fetch the job type Id.")
+ stage_dict = {"StageForNextReboot": 'true', "RebootNow": 'false'}
+ schedule = module.params["schedule"]
+ params = [{"Key": "operationName", "Value": "INSTALL_FIRMWARE"},
+ {"Key": "stagingValue", "Value": stage_dict[schedule]},
+ {"Key": "signVerify", "Value": "true"}]
+ # reboot applicable only if staging false
+ if schedule == "RebootNow":
+ params.append({"Key": "rebootType", "Value": "3"})
+ # reboot_dict = {"GracefulReboot": "2", "GracefulRebootForce": "3", "PowerCycle": "1"}
+ payload = {
+ "Id": 0, "JobName": "Firmware Update Task",
+ "JobDescription": FW_JOB_DESC, "Schedule": "startnow",
+ "State": "Enabled", "JobType": {"Id": resp, "Name": "Update_Task"},
+ "Targets": target_data,
+ "Params": params
+ }
+
+ if baseline is not None:
+ payload["Params"].append({"Key": "complianceReportId", "Value": "{0}".format(baseline["baseline_id"])})
+ payload["Params"].append({"Key": "repositoryId", "Value": "{0}".format(baseline["repo_id"])})
+ payload["Params"].append({"Key": "catalogId", "Value": "{0}".format(baseline["catalog_id"])})
+ payload["Params"].append({"Key": "complianceUpdate", "Value": "true"})
+ else:
+ payload["Params"].append({"JobId": 0, "Key": "complianceUpdate", "Value": "false"})
+ return payload
+
+
+def get_applicable_components(rest_obj, dup_payload, module):
+ """Get the target array to be used in spawning jobs for update."""
+ target_data = []
+ dup_url = "UpdateService/Actions/UpdateService.GetSingleDupReport"
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
+ dup_resp = rest_obj.invoke_request("POST", dup_url, data=dup_payload,
+ headers=headers, api_timeout=60)
+ if dup_resp.status_code == 200:
+ dup_data = dup_resp.json_data
+ file_token = str(dup_payload['SingleUpdateReportFileToken'])
+ for device in dup_data:
+ for component in device['DeviceReport']['Components']:
+ temp_map = {}
+ temp_map['Id'] = device['DeviceId']
+ temp_map['Data'] = "{0}={1}".format(component['ComponentSourceName'], file_token)
+ temp_map['TargetType'] = {}
+ temp_map['TargetType']['Id'] = int(device['DeviceReport']['DeviceTypeId'])
+ temp_map['TargetType']['Name'] = str(device['DeviceReport']['DeviceTypeName'])
+ target_data.append(temp_map)
+ else:
+ module.fail_json(msg=APPLICABLE_DUP)
+ return target_data
+
+
+def get_dup_applicability_payload(file_token, device_ids=None, group_ids=None, baseline_ids=None):
+ """Returns the DUP applicability JSON payload."""
+ dup_applicability_payload = {'SingleUpdateReportBaseline': [],
+ 'SingleUpdateReportGroup': [],
+ 'SingleUpdateReportTargets': [],
+ 'SingleUpdateReportFileToken': file_token}
+ if device_ids is not None:
+ dup_applicability_payload.update({"SingleUpdateReportTargets": list(map(int, device_ids))})
+ elif group_ids is not None:
+ dup_applicability_payload.update({"SingleUpdateReportGroup": list(map(int, group_ids))})
+ elif baseline_ids is not None:
+ dup_applicability_payload.update({"SingleUpdateReportBaseline": list(map(int, baseline_ids))})
+ return dup_applicability_payload
+
+
+def upload_dup_file(rest_obj, module):
+ """Upload DUP file to OME and get a file token."""
+ upload_uri = "UpdateService/Actions/UpdateService.UploadFile"
+ headers = {"Content-Type": "application/octet-stream", "Accept": "application/octet-stream"}
+ upload_success, token = False, None
+ dup_file = module.params['dup_file']
+ with open(dup_file, 'rb') as payload:
+ payload = payload.read()
+ response = rest_obj.invoke_request("POST", upload_uri, data=payload, headers=headers,
+ api_timeout=100, dump=False)
+ if response.status_code == 200:
+ upload_success = True
+ token = str(response.json_data)
+ else:
+ module.fail_json(msg="Unable to upload {0} to {1}".format(dup_file, module.params['hostname']))
+ return upload_success, token
+
+
+def get_device_ids(rest_obj, module, device_id_tags):
+ """Getting the list of device ids filtered from the device inventory."""
+ device_id = []
+ resp = rest_obj.get_all_report_details("DeviceService/Devices")
+ if resp.get("report_list"):
+ device_resp = dict([(str(device['Id']), device['DeviceServiceTag']) for device in resp["report_list"]])
+ device_tags = map(str, device_id_tags)
+ invalid_tags = []
+ for tag in device_tags:
+ if tag in device_resp.keys():
+ device_id.append(tag)
+ elif tag in device_resp.values():
+ ids = list(device_resp.keys())[list(device_resp.values()).index(tag)]
+ device_id.append(ids)
+ else:
+ invalid_tags.append(tag)
+ if invalid_tags:
+ module.fail_json(
+ msg="Unable to complete the operation because the entered target device service"
+ " tag(s) or device id(s) '{0}' are invalid.".format(",".join(set(invalid_tags))))
+ else:
+ module.fail_json(msg="Failed to fetch the device facts.")
+ return device_id, device_resp
+
+
+def get_group_ids(rest_obj, module):
+ """Getting the list of group ids filtered from the groups."""
+ resp = rest_obj.get_all_report_details("GroupService/Groups")
+ group_name = module.params.get('device_group_names')
+ if resp["report_list"]:
+ grp_ids = [grp['Id'] for grp in resp["report_list"] for grpname in group_name if grp['Name'] == grpname]
+ if len(set(group_name)) != len(set(grp_ids)):
+ module.fail_json(
+ msg="Unable to complete the operation because the entered target device group name(s)"
+ " '{0}' are invalid.".format(",".join(set(group_name))))
+ return grp_ids
+
+
+def get_baseline_ids(rest_obj, module):
+ """Getting the list of group ids filtered from the groups."""
+ resp = rest_obj.get_all_report_details(BASELINE_URI)
+ baseline, baseline_details = module.params.get('baseline_name'), {}
+ if resp["report_list"]:
+ for bse in resp["report_list"]:
+ if bse['Name'] == baseline:
+ baseline_details["baseline_id"] = bse["Id"]
+ baseline_details["repo_id"] = bse["RepositoryId"]
+ baseline_details["catalog_id"] = bse["CatalogId"]
+ if not baseline_details:
+ module.fail_json(
+ msg="Unable to complete the operation because the entered target baseline name"
+ " '{0}' is invalid.".format(baseline))
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered "
+ "target baseline name does not exist.")
+ return baseline_details
+
+
+def single_dup_update(rest_obj, module):
+ target_data, device_ids, group_ids, baseline_ids = None, None, None, None
+ if module.params.get("device_group_names") is not None:
+ group_ids = get_group_ids(rest_obj, module)
+ else:
+ device_id_tags = _validate_device_attributes(module)
+ device_ids, id_tag_map = get_device_ids(rest_obj, module, device_id_tags)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND)
+ upload_status, token = upload_dup_file(rest_obj, module)
+ if upload_status:
+ report_payload = get_dup_applicability_payload(token, device_ids=device_ids, group_ids=group_ids,
+ baseline_ids=baseline_ids)
+ if report_payload:
+ target_data = get_applicable_components(rest_obj, report_payload, module)
+ return target_data
+
+
+def baseline_based_update(rest_obj, module, baseline, dev_comp_map):
+ compliance_uri = COMPLIANCE_URI.format(baseline["baseline_id"])
+ resp = rest_obj.get_all_report_details(compliance_uri)
+ compliance_report_list = []
+ update_actions = ["UPGRADE", "DOWNGRADE"]
+ if resp["report_list"]:
+ comps = []
+ if not dev_comp_map:
+ comps = module.params.get('components')
+ dev_comp_map = dict([(str(dev["DeviceId"]), comps) for dev in resp["report_list"]])
+ for dvc in resp["report_list"]:
+ dev_id = dvc["DeviceId"]
+ if str(dev_id) in dev_comp_map:
+ comps = dev_comp_map.get(str(dev_id), [])
+ compliance_report = dvc.get("ComponentComplianceReports")
+ if compliance_report is not None:
+ data_dict = {}
+ comp_list = []
+ if not comps:
+ comp_list = list(icomp["SourceName"] for icomp in compliance_report
+ if icomp["UpdateAction"] in update_actions)
+ else:
+ comp_list = list(icomp["SourceName"] for icomp in compliance_report
+ if ((icomp["UpdateAction"] in update_actions) and
+ (icomp.get('Name') in comps))) # regex filtering ++
+ if comp_list:
+ data_dict["Id"] = dev_id
+ data_dict["Data"] = str(";").join(comp_list)
+ data_dict["TargetType"] = {"Id": dvc['DeviceTypeId'], "Name": dvc["DeviceTypeName"]}
+ compliance_report_list.append(data_dict)
+ else:
+ module.fail_json(msg=COMPLIANCE_READ_FAIL)
+ if not compliance_report_list:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND)
+ return compliance_report_list
+
+
+def _validate_device_attributes(module):
+ device_id_tags = []
+ service_tag = module.params.get('device_service_tag')
+ device_id = module.params.get('device_id')
+ devices = module.params.get('devices')
+ if devices:
+ for dev in devices:
+ if dev.get('id'):
+ device_id_tags.append(dev.get('id'))
+ else:
+ device_id_tags.append(dev.get('service_tag'))
+ if device_id is not None:
+ device_id_tags.extend(device_id)
+ if service_tag is not None:
+ device_id_tags.extend(service_tag)
+ return device_id_tags
+
+
+def get_device_component_map(rest_obj, module):
+ device_id_tags = _validate_device_attributes(module)
+ device_ids, id_tag_map = get_device_ids(rest_obj, module, device_id_tags)
+ comps = module.params.get('components')
+ dev_comp_map = {}
+ if device_ids:
+ dev_comp_map = dict([(dev, comps) for dev in device_ids])
+ devices = module.params.get('devices')
+ if devices:
+ for dev in devices:
+ if dev.get('id'):
+ dev_comp_map[str(dev.get('id'))] = dev.get('components')
+ else:
+ id = list(id_tag_map.keys())[list(id_tag_map.values()).index(dev.get('service_tag'))]
+ dev_comp_map[str(id)] = dev.get('components')
+ return dev_comp_map
+
+
+def validate_inputs(module):
+ param = module.params
+ if param.get("dup_file"):
+ if not any([param.get("device_id"), param.get("device_service_tag"), param.get("device_group_names")]):
+ module.fail_json(msg=DUP_REQ_MSG)
+
+
+def main():
+ specs = {
+ "device_service_tag": {"type": "list", "elements": 'str'},
+ "device_id": {"type": "list", "elements": 'int'},
+ "dup_file": {"type": "path"},
+ "device_group_names": {"type": "list", "elements": 'str'},
+ "components": {"type": "list", "elements": 'str', "default": []},
+ "baseline_name": {"type": "str"},
+ "schedule": {"type": 'str', "choices": ['RebootNow', 'StageForNextReboot'], "default": 'RebootNow'},
+ "devices": {
+ "type": 'list', "elements": 'dict',
+ "options": {
+ "id": {'type': 'int'},
+ "service_tag": {"type": 'str'},
+ "components": {"type": "list", "elements": 'str', "default": []},
+ },
+ "mutually_exclusive": [('id', 'service_tag')],
+ "required_one_of": [('id', 'service_tag')]
+ },
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[["dup_file", "baseline_name"]],
+ mutually_exclusive=[
+ ["baseline_name", "dup_file"],
+ ["device_group_names", "device_id", "devices"],
+ ["device_group_names", "device_service_tag", "devices"],
+ ["baseline_name", "device_group_names"],
+ ["dup_file", "components", "devices"]],
+ supports_check_mode=True
+ )
+ validate_inputs(module)
+ update_status, baseline_details = {}, None
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ if module.params.get("baseline_name"):
+ baseline_details = get_baseline_ids(rest_obj, module)
+ device_comp_map = get_device_component_map(rest_obj, module)
+ target_data = baseline_based_update(rest_obj, module, baseline_details, device_comp_map)
+ else:
+ target_data = single_dup_update(rest_obj, module)
+ job_payload = job_payload_for_update(rest_obj, module, target_data, baseline=baseline_details)
+ update_status = spawn_update_job(rest_obj, job_payload)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, OSError) as err:
+ module.fail_json(msg=str(err))
+ module.exit_json(msg="Successfully submitted the firmware update job.", update_status=update_status, changed=True)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py
new file mode 100644
index 000000000..d6282db3a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py
@@ -0,0 +1,550 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_firmware_baseline
+short_description: Create, modify, or delete a firmware baseline on OpenManage Enterprise or OpenManage Enterprise Modular
+description: This module allows to create, modify, or delete a firmware baseline on OpenManage Enterprise or OpenManage Enterprise Modular.
+version_added: "2.0.0"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ state:
+ description:
+ - C(present) creates or modifies a baseline.
+ - C(absent) deletes an existing baseline.
+ choices:
+ - present
+ - absent
+ default: present
+ type: str
+ version_added: 3.4.0
+ baseline_name:
+ type: str
+ description:
+ - Name of the the baseline.
+ - This option is mutually exclusive with I(baseline_id).
+ baseline_id:
+ type: int
+ description:
+ - ID of the existing baseline.
+ - This option is mutually exclusive with I(baseline_name).
+ version_added: 3.4.0
+ new_baseline_name:
+ description: New name of the baseline.
+ type: str
+ version_added: 3.4.0
+ baseline_description:
+ type: str
+ description:
+ - Description for the baseline being created.
+ catalog_name:
+ type: str
+ description:
+ - Name of the catalog to be associated with the baseline.
+ downgrade_enabled:
+ type: bool
+ description:
+ - Indicates whether firmware downgrade is allowed for the devices in the baseline.
+ - This value will be set to C(True) by default, if not provided during baseline creation.
+ is_64_bit:
+ type: bool
+ description:
+ - Indicates if the repository contains 64-bit DUPs.
+ - This value will be set to C(True) by default, if not provided during baseline creation.
+ device_ids:
+ type: list
+ elements: int
+ description:
+ - List of device IDs.
+ - This option is mutually exclusive with I(device_service_tags) and I(device_group_names).
+ device_service_tags:
+ type: list
+ elements: str
+ description:
+ - List of device service tags.
+ - This option is mutually exclusive with I(device_ids) and I(device_group_names).
+ device_group_names:
+ type: list
+ elements: str
+ description:
+ - List of group names.
+ - This option is mutually exclusive with I(device_ids) and I(device_service_tags).
+ job_wait:
+ description:
+ - Provides the option to wait for job completion.
+ - This option is applicable when I(state) is C(present).
+ type: bool
+ default: true
+ version_added: 3.4.0
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 600
+ version_added: 3.4.0
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - I(device_group_names) option is not applicable for OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create baseline for device IDs
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ device_ids:
+ - 1010
+ - 2020
+
+- name: Create baseline for servicetags
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ device_service_tags:
+ - "SVCTAG1"
+ - "SVCTAG2"
+
+- name: Create baseline for device groups without job tracking
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ device_group_names:
+ - "Group1"
+ - "Group2"
+ job_wait: no
+
+- name: Modify an existing baseline
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "existing_baseline_name"
+ new_baseline_name: "new_baseline_name"
+ baseline_description: "new baseline_description"
+ catalog_name: "catalog_other"
+ device_group_names:
+ - "Group3"
+ - "Group4"
+ - "Group5"
+ downgrade_enabled: no
+ is_64_bit: yes
+
+- name: Delete a baseline
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ baseline_name: "baseline_name"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the firmware baseline operation.
+ returned: always
+ type: str
+ sample: "Successfully created the firmware baseline."
+baseline_status:
+ description: Details of the baseline status.
+ returned: success
+ type: dict
+ sample: {
+ "CatalogId": 123,
+ "Description": "BASELINE DESCRIPTION",
+ "DeviceComplianceReports": [],
+ "DowngradeEnabled": true,
+ "Id": 23,
+ "Is64Bit": true,
+ "Name": "my_baseline",
+ "RepositoryId": 123,
+ "RepositoryName": "catalog123",
+ "RepositoryType": "HTTP",
+ "Targets": [
+ {
+ "Id": 10083,
+ "Type": {
+ "Id": 1000,
+ "Name": "DEVICE"
+ }
+ },
+ {
+ "Id": 10076,
+ "Type": {
+ "Id": 1000,
+ "Name": "DEVICE"
+ }
+ }
+ ],
+ "TaskId": 11235,
+ "TaskStatusId": 2060
+ }
+job_id:
+ description: Job ID of the baseline task.
+ returned: When baseline job is in running state
+ type: int
+ sample: 10123
+baseline_id:
+ description: ID of the deleted baseline.
+ returned: When I(state) is C(absent)
+ type: int
+ sample: 10123
+error_info:
+ type: dict
+ description: Details of http error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to retrieve baseline list either because the device ID(s) entered are invalid",
+ "Resolution": "Make sure the entered device ID(s) are valid and retry the operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+'''
+
+BASELINE_URI = "UpdateService/Baselines"
+ID_BASELINE_URI = "UpdateService/Baselines({Id})"
+DELETE_BASELINE_URI = "UpdateService/Actions/UpdateService.RemoveBaselines"
+CATALOG_URI = "UpdateService/Catalogs"
+BASELINE_JOB_RUNNING = "Firmware baseline '{name}' with ID {id} is running. Please retry after job completion."
+BASELINE_DEL_SUCCESS = "Successfully deleted the firmware baseline."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+INVALID_BASELINE_ID = "Invalid baseline ID provided."
+BASELINE_TRIGGERED = "Successfully triggered the firmware baseline task."
+NO_CATALOG_MESSAGE = "Catalog name not provided for baseline creation."
+NO_TARGETS_MESSAGE = "Targets not specified for baseline creation."
+CATALOG_STATUS_MESSAGE = "Unable to create the firmware baseline as the catalog is in {status} status."
+BASELINE_UPDATED = "Successfully {op} the firmware baseline."
+SETTLING_TIME = 3
+JOB_POLL_INTERVAL = 10
+GROUP_ID = 6000
+
+
+import json
+import time
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.common.dict_transformations import recursive_diff
+
+
+def get_baseline_from_name(rest_obj, baseline):
+ resp = rest_obj.get_all_items_with_pagination(BASELINE_URI)
+ baselines_list = resp.get("value")
+ bsln = baseline
+ for d in baselines_list:
+ if d['Name'] == baseline.get('Name'):
+ bsln = d
+ break
+ nlist = list(bsln)
+ for k in nlist:
+ if str(k).lower().startswith('@odata'):
+ bsln.pop(k)
+ return bsln
+
+
+def check_existing_baseline(module, rest_obj):
+ baseline_id = module.params.get("baseline_id")
+ srch_key = "Name"
+ srch_val = module.params.get("baseline_name")
+ if baseline_id:
+ srch_key = "Id"
+ srch_val = module.params.get("baseline_id")
+ baseline_cfgs = []
+ resp = rest_obj.get_all_items_with_pagination(BASELINE_URI)
+ baselines = resp.get("value")
+ for d in baselines:
+ if d[srch_key] == srch_val:
+ baseline_cfgs.append(d)
+ if baseline_id:
+ break
+ return baseline_cfgs
+
+
+def get_catrepo_ids(module, cat_name, rest_obj):
+ if cat_name is not None:
+ resp_data = rest_obj.get_all_items_with_pagination(CATALOG_URI)
+ values = resp_data["value"]
+ if values:
+ for catalog in values:
+ repo = catalog.get("Repository")
+ if repo.get("Name") == cat_name:
+ if catalog.get('Status') != 'Completed':
+ module.fail_json(msg=CATALOG_STATUS_MESSAGE.format(status=catalog.get('Status')))
+ return catalog.get("Id"), repo.get("Id")
+ return None, None
+
+
+def get_dev_ids(module, rest_obj, param, devkey):
+ paramlist = module.params[param]
+ resp_data = rest_obj.get_all_items_with_pagination("DeviceService/Devices")
+ values = resp_data["value"]
+ targets = []
+ if values:
+ devlist = values
+ device_resp = dict([(device[devkey], device) for device in devlist])
+ for st in paramlist:
+ if st in device_resp:
+ djson = device_resp[st]
+ target = {}
+ device_type = {}
+ device_type['Id'] = djson['Type']
+ device_type['Name'] = "DEVICE"
+ target['Id'] = djson['Id']
+ target['Type'] = device_type
+ targets.append(target)
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered target"
+ " {0} '{1}' is invalid.".format(devkey, st))
+ return targets
+
+
+def get_group_ids(module, rest_obj):
+ grp_name_list = module.params.get("device_group_names")
+ resp_data = rest_obj.get_all_items_with_pagination("GroupService/Groups")
+ values = resp_data["value"]
+ targets = []
+ if values:
+ grplist = values
+ device_resp = dict([(str(grp['Name']), grp) for grp in grplist])
+ for st in grp_name_list:
+ if st in device_resp:
+ djson = device_resp[st]
+ target = {}
+ device_type = {}
+ device_type['Id'] = GROUP_ID
+ device_type['Name'] = "GROUP"
+ target['Id'] = djson['Id']
+ target['Type'] = device_type
+ targets.append(target)
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered target"
+ " Group Name '{0}' is invalid.".format(st))
+ return targets
+
+
+def get_target_list(module, rest_obj):
+ target_list = None
+ if module.params.get("device_service_tags"):
+ target_list = get_dev_ids(module, rest_obj, "device_service_tags", "DeviceServiceTag")
+ elif module.params.get("device_group_names"):
+ target_list = get_group_ids(module, rest_obj)
+ elif module.params.get("device_ids"):
+ target_list = get_dev_ids(module, rest_obj, "device_ids", "Id")
+ return target_list
+
+
+def exit_baseline(module, rest_obj, baseline, op):
+ msg = BASELINE_TRIGGERED
+ time.sleep(SETTLING_TIME)
+ try:
+ bsln = get_baseline_from_name(rest_obj, baseline)
+ except Exception:
+ bsln = baseline
+ if module.params.get("job_wait"):
+ job_failed, job_message = rest_obj.job_tracking(
+ baseline.get('TaskId'), job_wait_sec=module.params["job_wait_timeout"], sleep_time=JOB_POLL_INTERVAL)
+ if job_failed is True:
+ module.fail_json(msg=job_message, baseline_status=bsln)
+ msg = BASELINE_UPDATED.format(op=op)
+ module.exit_json(msg=msg, baseline_status=bsln, changed=True)
+
+
+def _get_baseline_payload(module, rest_obj):
+ cat_name = module.params.get("catalog_name")
+ cat_id, repo_id = get_catrepo_ids(module, cat_name, rest_obj)
+ if cat_id is None or repo_id is None:
+ module.fail_json(msg="No Catalog with name {0} found".format(cat_name))
+ targets = get_target_list(module, rest_obj)
+ if targets is None:
+ module.fail_json(msg=NO_TARGETS_MESSAGE)
+ baseline_name = module.params.get("baseline_name")
+ baseline_payload = {
+ "Name": baseline_name,
+ "CatalogId": cat_id,
+ "RepositoryId": repo_id,
+ "Targets": targets
+ }
+ baseline_payload['Description'] = module.params.get("baseline_description")
+ de = module.params.get("downgrade_enabled")
+ baseline_payload['DowngradeEnabled'] = de if de is not None else True
+ sfb = module.params.get("is_64_bit")
+ baseline_payload['Is64Bit'] = sfb if sfb is not None else True
+ return baseline_payload
+
+
+def create_baseline(module, rest_obj):
+ myparams = module.params
+ if not any([myparams.get("device_ids"), myparams.get("device_service_tags"), myparams.get("device_group_names")]):
+ module.fail_json(msg=NO_TARGETS_MESSAGE)
+ if not myparams.get("catalog_name"):
+ module.fail_json(msg=NO_CATALOG_MESSAGE)
+ payload = _get_baseline_payload(module, rest_obj)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ resp = rest_obj.invoke_request("POST", BASELINE_URI, data=payload)
+ exit_baseline(module, rest_obj, resp.json_data, 'created')
+
+
+def update_modify_payload(module, rest_obj, modify_payload, current_baseline):
+ paylist = ['Name', "CatalogId", "RepositoryId", 'Description', 'DowngradeEnabled', 'Is64Bit']
+ diff_tuple = recursive_diff(modify_payload, current_baseline)
+ diff = 0
+ payload = dict([(item, current_baseline.get(item)) for item in paylist])
+ if diff_tuple:
+ if diff_tuple[0]:
+ diff += 1
+ payload.update(diff_tuple[0])
+ payload['Targets'] = current_baseline.get('Targets', [])
+ inp_targets_list = get_target_list(module, rest_obj)
+ if inp_targets_list:
+ inp_target_dict = dict([(item['Id'], item['Type']['Id']) for item in inp_targets_list])
+ cur_target_dict = dict([(item['Id'], item['Type']['Id']) for item in current_baseline.get('Targets', [])])
+ diff_tuple = recursive_diff(inp_target_dict, cur_target_dict)
+ if diff_tuple:
+ diff += 1
+ payload['Targets'] = inp_targets_list
+ if diff == 0:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ payload['Id'] = current_baseline['Id']
+ return payload
+
+
+def modify_baseline(module, rest_obj, baseline_list):
+ d = baseline_list[0]
+ if d["TaskStatusId"] == 2050:
+ module.fail_json(msg=BASELINE_JOB_RUNNING.format(name=d["Name"], id=d["Id"]), job_id=d['TaskId'])
+ mparam = module.params
+ current_baseline = baseline_list[0]
+ modify_payload = {}
+ if mparam.get('catalog_name'):
+ cat_id, repo_id = get_catrepo_ids(module, mparam.get('catalog_name'), rest_obj)
+ if cat_id is None or repo_id is None:
+ module.fail_json(msg="No Catalog with name {0} found".format(mparam.get('catalog_name')))
+ modify_payload["CatalogId"] = cat_id
+ modify_payload["RepositoryId"] = repo_id
+ if mparam.get('new_baseline_name'):
+ modify_payload['Name'] = mparam.get('new_baseline_name')
+ if mparam.get("baseline_description"):
+ modify_payload['Description'] = mparam.get("baseline_description")
+ if module.params.get("downgrade_enabled") is not None:
+ modify_payload['DowngradeEnabled'] = module.params.get("downgrade_enabled")
+ if module.params.get("is_64_bit") is not None:
+ modify_payload['Is64Bit'] = module.params.get("is_64_bit")
+ payload = update_modify_payload(module, rest_obj, modify_payload, current_baseline)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ resp = rest_obj.invoke_request("PUT", ID_BASELINE_URI.format(Id=str(payload["Id"])), data=payload)
+ exit_baseline(module, rest_obj, resp.json_data, 'modified')
+
+
+def delete_baseline(module, rest_obj, baseline_list):
+ delete_ids = []
+ d = baseline_list[0]
+ if d["TaskStatusId"] == 2050:
+ module.fail_json(msg=BASELINE_JOB_RUNNING.format(name=d["Name"], id=d["Id"]), job_id=d['TaskId'])
+ delete_ids.append(d["Id"])
+ delete_payload = {"BaselineIds": delete_ids}
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ rest_obj.invoke_request('POST', DELETE_BASELINE_URI, data=delete_payload)
+ module.exit_json(msg=BASELINE_DEL_SUCCESS, changed=True, baseline_id=delete_ids[0])
+
+
+def main():
+ specs = {
+ "state": {"default": "present", "choices": ['present', 'absent']},
+ "baseline_name": {"type": 'str'},
+ "baseline_id": {"type": 'int'},
+ "baseline_description": {"type": 'str'},
+ "new_baseline_name": {"type": 'str'},
+ "catalog_name": {"type": 'str'},
+ "downgrade_enabled": {"type": 'bool'},
+ "is_64_bit": {"type": 'bool'},
+ "device_ids": {"type": 'list', "elements": 'int'},
+ "device_service_tags": {"type": 'list', "elements": 'str'},
+ "device_group_names": {"type": 'list', "elements": 'str'},
+ "job_wait": {"type": 'bool', "default": True},
+ "job_wait_timeout": {"type": 'int', "default": 600}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[
+ ('device_ids', 'device_service_tags', 'device_group_names'),
+ ('baseline_name', 'baseline_id')
+ ],
+ required_one_of=[('baseline_name', 'baseline_id')],
+ supports_check_mode=True)
+
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ baseline_list = check_existing_baseline(module, rest_obj)
+ if module.params.get('state') == 'absent':
+ if baseline_list:
+ delete_baseline(module, rest_obj, baseline_list)
+ module.exit_json(msg=NO_CHANGES_MSG)
+ else:
+ if baseline_list:
+ modify_baseline(module, rest_obj, baseline_list)
+ else:
+ if module.params.get('baseline_id'):
+ module.fail_json(msg=INVALID_BASELINE_ID)
+ create_baseline(module, rest_obj)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py
new file mode 100644
index 000000000..9e138a002
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py
@@ -0,0 +1,420 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_firmware_baseline_compliance_info
+short_description: Retrieves baseline compliance details on OpenManage Enterprise
+version_added: "2.0.0"
+description:
+ - This module allows to retrieve firmware compliance for a list of devices,
+ or against a specified baseline on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ baseline_name:
+ description:
+ - Name of the baseline, for which the device compliance report is generated.
+ - This option is mandatory for generating baseline based device compliance report.
+ - I(baseline_name) is mutually exclusive with I(device_ids), I(device_service_tags) and I(device_group_names).
+ type: str
+ device_ids:
+ description:
+ - A list of unique identifier for device based compliance report.
+ - Either I(device_ids), I(device_service_tags) or I(device_group_names)
+ is required to generate device based compliance report.
+ - I(device_ids) is mutually exclusive with I(device_service_tags),
+ I(device_group_names) and I(baseline_name).
+ - Devices without reports are ignored.
+ type: list
+ elements: int
+ device_service_tags:
+ description:
+ - A list of service tags for device based compliance report.
+ - Either I(device_ids), I(device_service_tags) or I(device_group_names)
+ is required to generate device based compliance report.
+ - I(device_service_tags) is mutually exclusive with I(device_ids),
+ I(device_group_names) and I(baseline_name).
+ - Devices without reports are ignored.
+ type: list
+ elements: str
+ device_group_names:
+ description:
+ - A list of group names for device based compliance report.
+ - Either I(device_ids), I(device_service_tags) or I(device_group_names)
+ is required to generate device based compliance report.
+ - I(device_group_names) is mutually exclusive with I(device_ids),
+ I(device_service_tags) and I(baseline_name).
+ - Devices without reports are ignored.
+ type: list
+ elements: str
+requirements:
+ - "python >= 3.8.6"
+author: "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieves device based compliance report for specified device IDs
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_ids:
+ - 11111
+ - 22222
+
+- name: Retrieves device based compliance report for specified service Tags
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tags:
+ - MXL1234
+ - MXL4567
+
+- name: Retrieves device based compliance report for specified group names
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_group_names:
+ - "group1"
+ - "group2"
+
+- name: Retrieves device compliance report for a specified baseline
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall baseline compliance report status.
+ returned: on error
+ sample: "Failed to fetch the compliance baseline information."
+baseline_compliance_info:
+ type: dict
+ description: Details of the baseline compliance report.
+ returned: success
+ sample: [
+ {
+ "CatalogId": 53,
+ "ComplianceSummary": {
+ "ComplianceStatus": "CRITICAL",
+ "NumberOfCritical": 2,
+ "NumberOfDowngrade": 0,
+ "NumberOfNormal": 0,
+ "NumberOfWarning": 0
+ },
+ "Description": "",
+ "DeviceComplianceReports": [
+ {
+ "ComplianceStatus": "CRITICAL",
+ "ComponentComplianceReports": [
+ {
+ "ComplianceDependencies": [],
+ "ComplianceStatus": "DOWNGRADE",
+ "Criticality": "Ok",
+ "CurrentVersion": "OSC_1.1",
+ "Id": 1258,
+ "ImpactAssessment": "",
+ "Name": "OS COLLECTOR 2.1",
+ "Path": "FOLDER04118304M/2/Diagnostics_Application_JCCH7_WN64_4.0_A00_01.EXE",
+ "PrerequisiteInfo": "",
+ "RebootRequired": false,
+ "SourceName": "DCIM:INSTALLED#802__OSCollector.Embedded.1",
+ "TargetIdentifier": "101734",
+ "UniqueIdentifier": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "UpdateAction": "DOWNGRADE",
+ "Uri": "http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX",
+ "Version": "4.0"
+ },
+ {
+ "ComplianceDependencies": [],
+ "ComplianceStatus": "CRITICAL",
+ "Criticality": "Recommended",
+ "CurrentVersion": "DN02",
+ "Id": 1259,
+ "ImpactAssessment": "",
+ "Name": "TOSHIBA AL14SE 1.8 TB 2.5 12Gb 10K 512n SAS HDD Drive",
+ "Path": "FOLDER04086111M/1/SAS-Drive_Firmware_VDGFM_WN64_DN03_A00.EXE",
+ "PrerequisiteInfo": "",
+ "RebootRequired": true,
+ "SourceName": "DCIM:INSTALLED#304_C_Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1",
+ "TargetIdentifier": "103730",
+ "UniqueIdentifier": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "UpdateAction": "UPGRADE",
+ "Uri": "http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX",
+ "Version": "DN03"
+ }
+ ],
+ "DeviceId": 11603,
+ "DeviceModel": "PowerEdge R630",
+ "DeviceName": null,
+ "DeviceTypeId": 1000,
+ "DeviceTypeName": "CPGCGS",
+ "FirmwareStatus": "Non-Compliant",
+ "Id": 194,
+ "RebootRequired": true,
+ "ServiceTag": "MXL1234"
+ }
+ ],
+ "DowngradeEnabled": true,
+ "Id": 53,
+ "Is64Bit": false,
+ "LastRun": "2019-09-27 05:08:16.301",
+ "Name": "baseline1",
+ "RepositoryId": 43,
+ "RepositoryName": "catalog2",
+ "RepositoryType": "CIFS",
+ "Targets": [
+ {
+ "Id": 11603,
+ "Type": {
+ "Id": 1000,
+ "Name": "DEVICE"
+ }
+ }
+ ],
+ "TaskId": 11710,
+ "TaskStatusId": 0
+ }
+ ]
+error_info:
+ type: dict
+ description: Details of http error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to retrieve baseline list either because the device ID(s) entered are invalid",
+ "Resolution": "Make sure the entered device ID(s) are valid and retry the operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+base_line_path = "UpdateService/Baselines"
+baselines_report_by_device_ids_path = "UpdateService/Actions/UpdateService.GetBaselinesReportByDeviceids"
+device_is_list_path = "DeviceService/Devices"
+baselines_compliance_report_path = "UpdateService/Baselines({Id})/DeviceComplianceReports"
+group_service_path = "GroupService/Groups"
+EXIT_MESSAGE = "Unable to retrieve baseline list either because the device ID(s) entered are invalid, " \
+ "the ID(s) provided are not associated with a baseline or a group is used as a target for a baseline."
+MSG_ID = "CUPD3090"
+
+
+def _get_device_id_from_service_tags(service_tags, rest_obj, module):
+ """
+ Get device ids from device service tag
+ Returns :dict : device_id to service_tag map
+ :arg service_tags: service tag
+ :arg rest_obj: RestOME class object in case of request with session.
+ :returns: dict eg: {1345:"MXL1245"}
+ """
+ try:
+ resp = rest_obj.get_all_report_details("DeviceService/Devices")
+ devices_list = resp["report_list"]
+ if devices_list:
+ service_tag_dict = {}
+ for item in devices_list:
+ if item["DeviceServiceTag"] in service_tags:
+ service_tag_dict.update({item["Id"]: item["DeviceServiceTag"]})
+ return service_tag_dict
+ else:
+ module.exit_json(msg="Unable to fetch the device information.", baseline_compliance_info=[])
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def get_device_ids_from_group_ids(module, grou_id_list, rest_obj):
+ try:
+ device_id_list = []
+ for group_id in grou_id_list:
+ group_id_path = group_service_path + "({group_id})/Devices".format(group_id=group_id)
+ resp_val = rest_obj.get_all_items_with_pagination(group_id_path)
+ grp_list_value = resp_val["value"]
+ if grp_list_value:
+ for device_item in grp_list_value:
+ device_id_list.append(device_item["Id"])
+ if len(device_id_list) == 0:
+ module.exit_json(msg="Unable to fetch the device ids from specified device_group_names.",
+ baseline_compliance_info=[])
+ return device_id_list
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def get_device_ids_from_group_names(module, rest_obj):
+ try:
+ grp_name_list = module.params.get("device_group_names")
+ resp = rest_obj.get_all_report_details(group_service_path)
+ group_id_list = []
+ grp_list_resp = resp["report_list"]
+ if grp_list_resp:
+ for name in grp_name_list:
+ for group in grp_list_resp:
+ if group["Name"] == name:
+ group_id_list.append(group['Id'])
+ break
+ else:
+ module.exit_json(msg="Unable to fetch the specified device_group_names.",
+ baseline_compliance_info=[])
+ return get_device_ids_from_group_ids(module, group_id_list, rest_obj)
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def get_identifiers(rest_obj, module):
+ if module.params.get("device_ids") is not None:
+ return module.params.get("device_ids"), "device_ids"
+ elif module.params.get("device_group_names") is not None:
+ return get_device_ids_from_group_names(module, rest_obj), "device_group_names"
+ else:
+ service_tags = module.params.get("device_service_tags")
+ service_tags_mapper = _get_device_id_from_service_tags(service_tags, rest_obj, module)
+ return list(service_tags_mapper.keys()), "device_service_tags"
+
+
+def get_baseline_id_from_name(rest_obj, module):
+ try:
+ baseline_name = module.params.get("baseline_name")
+ baseline_id = 0
+ if baseline_name is not None:
+ resp_val = rest_obj.get_all_items_with_pagination(base_line_path)
+ baseline_list = resp_val["value"]
+ if baseline_list:
+ for baseline in baseline_list:
+ if baseline["Name"] == baseline_name:
+ baseline_id = baseline["Id"]
+ break
+ else:
+ module.exit_json(msg="Specified baseline_name does not exist in the system.",
+ baseline_compliance_info=[])
+ else:
+ module.exit_json(msg="No baseline exists in the system.", baseline_compliance_info=[])
+ else:
+ module.fail_json(msg="baseline_name is a mandatory option.")
+ return baseline_id
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def get_baselines_report_by_device_ids(rest_obj, module):
+ try:
+ device_ids, identifier = get_identifiers(rest_obj, module)
+ if device_ids or identifier == "device_ids":
+ resp = rest_obj.invoke_request('POST', baselines_report_by_device_ids_path, data={"Ids": device_ids})
+ return resp.json_data
+ else:
+ identifier_map = {
+ "device_group_names": "Device details not available as the group name(s) provided are invalid.",
+ "device_service_tags": "Device details not available as the service tag(s) provided are invalid."
+ }
+ message = identifier_map[identifier]
+ module.exit_json(msg=message)
+ except HTTPError as err:
+ err_message = json.load(err)
+ err_list = err_message.get('error', {}).get('@Message.ExtendedInfo', [{"Message": EXIT_MESSAGE}])
+ if err_list:
+ err_reason = err_list[0].get("Message", EXIT_MESSAGE)
+ if MSG_ID in err_list[0].get('MessageId'):
+ module.exit_json(msg=err_reason)
+ raise err
+ except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def get_baseline_compliance_reports(rest_obj, module):
+ try:
+ baseline_id = get_baseline_id_from_name(rest_obj, module)
+ path = baselines_compliance_report_path.format(Id=baseline_id)
+ resp_val = rest_obj.get_all_items_with_pagination(path)
+ resp_data = resp_val["value"]
+ return resp_data
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def validate_inputs(module):
+ module_params = module.params
+ device_service_tags = module_params.get("device_service_tags")
+ device_group_names = module_params.get("device_group_names")
+ device_ids = module_params.get("device_ids")
+ baseline_name = module_params.get("baseline_name")
+ if all(not identifer for identifer in [device_ids, device_service_tags, device_group_names, baseline_name]):
+ module.fail_json(msg="one of the following is required: device_ids, device_service_tags, "
+ "device_group_names, baseline_name to generate device based compliance report.")
+
+
+def main():
+ specs = {
+ "baseline_name": {"type": 'str', "required": False},
+ "device_service_tags": {"required": False, "type": "list", "elements": 'str'},
+ "device_ids": {"required": False, "type": "list", "elements": 'int'},
+ "device_group_names": {"required": False, "type": "list", "elements": 'str'},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[['baseline_name', 'device_service_tags', 'device_ids', 'device_group_names']],
+ required_one_of=[['device_ids', 'device_service_tags', 'device_group_names', 'baseline_name']],
+ supports_check_mode=True
+ )
+ try:
+ validate_inputs(module)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ baseline_name = module.params.get("baseline_name")
+ if baseline_name is not None:
+ data = get_baseline_compliance_reports(rest_obj, module)
+ else:
+ data = get_baselines_report_by_device_ids(rest_obj, module)
+ if data:
+ module.exit_json(baseline_compliance_info=data)
+ else:
+ module.exit_json(msg="Unable to fetch the compliance baseline information.")
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py
new file mode 100644
index 000000000..a98359169
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py
@@ -0,0 +1,155 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_firmware_baseline_info
+short_description: Retrieves baseline details from OpenManage Enterprise
+version_added: "2.0.0"
+description:
+ - This module retrieves the list and details of all the baselines on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ baseline_name:
+ description: Name of the baseline.If I(baseline_name) is not provided,
+ all the available firmware baselines are returned.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author: "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieve details of all the available firmware baselines
+ dellemc.openmanage.ome_firmware_baseline_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve details of a specific firmware baseline identified by its baseline name
+ dellemc.openmanage.ome_firmware_baseline_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall baseline information.
+ returned: on error
+ sample: "Successfully fetched firmware baseline information."
+baseline_info:
+ type: dict
+ description: Details of the baselines.
+ returned: success
+ sample: {
+ "@odata.id": "/api/UpdateService/Baselines(239)",
+ "@odata.type": "#UpdateService.Baselines",
+ "CatalogId": 22,
+ "ComplianceSummary": {
+ "ComplianceStatus": "CRITICAL",
+ "NumberOfCritical": 1,
+ "NumberOfDowngrade": 0,
+ "NumberOfNormal": 0,
+ "NumberOfWarning": 0
+ },
+ "Description": "baseline_description",
+ "DeviceComplianceReports@odata.navigationLink": "/api/UpdateService/Baselines(239)/DeviceComplianceReports",
+ "DowngradeEnabled": true,
+ "Id": 239,
+ "Is64Bit": true,
+ "LastRun": "2020-05-22 16:42:40.307",
+ "Name": "baseline_name",
+ "RepositoryId": 12,
+ "RepositoryName": "HTTP DELL",
+ "RepositoryType": "DELL_ONLINE",
+ "Targets": [
+ {
+ "Id": 10342,
+ "Type": {
+ "Id": 1000,
+ "Name": "DEVICE"
+ }
+ }
+ ],
+ "TaskId": 41415,
+ "TaskStatusId": 2060
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+def get_specific_baseline(module, baseline_name, resp_data):
+ """Get specific baseline."""
+ baseline = None
+ for each in resp_data["value"]:
+ if each['Name'] == baseline_name:
+ baseline = each
+ break
+ else:
+ module.exit_json(msg="Unable to complete the operation because the requested baseline"
+ " with name '{0}' does not exist.".format(baseline_name), baseline_info=[])
+ return baseline
+
+
+def main():
+ specs = {
+ "baseline_name": {"type": 'str', "required": False},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=False) as rest_obj:
+ baseline_name = module.params.get("baseline_name")
+ resp = rest_obj.invoke_request('GET', "UpdateService/Baselines")
+ data = resp.json_data
+ if len(data["value"]) == 0 and not baseline_name:
+ module.exit_json(msg="No baselines present.", baseline_info=[])
+ if baseline_name is not None:
+ data = get_specific_baseline(module, baseline_name, data)
+ module.exit_json(msg="Successfully fetched firmware baseline information.", baseline_info=data)
+ except HTTPError as err:
+ if err.getcode() == 404:
+ module.fail_json(msg="404 Not Found.The requested resource is not available.")
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py
new file mode 100644
index 000000000..29b7ed905
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py
@@ -0,0 +1,644 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_firmware_catalog
+short_description: Create, modify, or delete a firmware catalog on OpenManage Enterprise or OpenManage Enterprise Modular
+version_added: "2.0.0"
+description: This module allows to create, modify, or delete a firmware catalog on OpenManage Enterprise or OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ state:
+ description:
+ - C(present) creates or modifies a catalog.
+ - C(absent) deletes an existing catalog.
+ choices: [present, absent]
+ default: present
+ type: str
+ version_added: 3.4.0
+ catalog_name:
+ type: list
+ elements: str
+ description:
+ - Name of the firmware catalog to be created.
+ - This option is mutually exclusive with I(catalog_id).
+ - Provide the list of firmware catalog names that are supported when I(state) is C(absent).
+ new_catalog_name:
+ type: str
+ description:
+ - New name of the firmware catalog.
+ version_added: 3.4.0
+ catalog_id:
+ type: list
+ elements: int
+ description:
+ - ID of the catalog.
+ - This option is mutually exclusive with I(catalog_name).
+ - Provide the list of firmware catalog IDs that are supported when I(state) is C(absent).
+ version_added: 3.4.0
+ catalog_description:
+ type: str
+ description:
+ - Description for the catalog.
+ source:
+ type: str
+ description:
+ - The IP address of the system where the firmware catalog is stored on the local network.
+ - By default, this option is set to downloads.dell.com when I(repository_type) is C(DELL_ONLINE).
+ source_path:
+ type: str
+ description:
+ - Specify the complete path of the catalog file location without the file name.
+ - This is option ignored when I(repository_type) is C(DELL_ONLINE).
+ file_name:
+ type: str
+ description:
+ - Catalog file name associated with the I(source_path).
+ - This option is ignored when I(repository_type) is C(DELL_ONLINE).
+ repository_type:
+ type: str
+ description:
+ - Type of repository. The supported types are NFS, CIFS, HTTP, HTTPS,and DELL_ONLINE.
+ choices: ["NFS", "CIFS", "HTTP", "HTTPS", "DELL_ONLINE"]
+ repository_username:
+ type: str
+ description:
+ - User name of the repository where the catalog is stored.
+ - This option is mandatory when I(repository_type) is CIFS.
+ - This option is ignored when I(repository_type) is C(DELL_ONLINE).
+ repository_password:
+ type: str
+ description:
+ - Password to access the repository.
+ - This option is mandatory when I(repository_type) is CIFS.
+ - This option is ignored when I(repository_type) is C(DELL_ONLINE).
+ - C(NOTE) The module always reports the changed status, when this is provided.
+ repository_domain:
+ type: str
+ description:
+ - Domain name of the repository.
+ - This option is ignored when I(repository_type) is C(DELL_ONLINE).
+ check_certificate:
+ type: bool
+ description:
+ - The certificate warnings are ignored when I(repository_type) is HTTPS. If C(True). If not, certificate warnings
+ are not ignored.
+ default: False
+ job_wait:
+ description:
+ - Provides the option to wait for job completion.
+ - This option is applicable when I(state) is C(present).
+ type: bool
+ default: true
+ version_added: 3.4.0
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 600
+ version_added: 3.4.0
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+ - "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - If I(repository_password) is provided, then the module always reports the changed status.
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create a catalog from HTTPS repository
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "catalog_name"
+ catalog_description: "catalog_description"
+ repository_type: "HTTPS"
+ source: "downloads.dell.com"
+ source_path: "catalog"
+ file_name: "catalog.gz"
+ check_certificate: True
+
+- name: Create a catalog from HTTP repository
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "catalog_name"
+ catalog_description: "catalog_description"
+ repository_type: "HTTP"
+ source: "downloads.dell.com"
+ source_path: "catalog"
+ file_name: "catalog.gz"
+
+- name: Create a catalog using CIFS share
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "catalog_name"
+ catalog_description: "catalog_description"
+ repository_type: "CIFS"
+ source: "192.167.0.1"
+ source_path: "cifs/R940"
+ file_name: "catalog.gz"
+ repository_username: "repository_username"
+ repository_password: "repository_password"
+ repository_domain: "repository_domain"
+
+- name: Create a catalog using NFS share
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "catalog_name"
+ catalog_description: "catalog_description"
+ repository_type: "NFS"
+ source: "192.166.0.2"
+ source_path: "/nfs/R940"
+ file_name: "catalog.xml"
+
+- name: Create a catalog using repository from Dell.com
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "catalog_name"
+ catalog_description: "catalog_description"
+ repository_type: "DELL_ONLINE"
+ check_certificate: True
+
+- name: Modify a catalog using a repository from CIFS share
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "catalog_name"
+ catalog_description: "new catalog_description"
+ repository_type: "CIFS"
+ source: "192.167.0.2"
+ source_path: "cifs/R941"
+ file_name: "catalog1.gz"
+ repository_username: "repository_username"
+ repository_password: "repository_password"
+ repository_domain: "repository_domain"
+
+- name: Modify a catalog using a repository from Dell.com
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_id: 10
+ new_catalog_name: "new_catalog_name"
+ repository_type: "DELL_ONLINE"
+ catalog_description: "catalog_description"
+
+- name: Delete catalog using catalog name
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ catalog_name: ["catalog_name1", "catalog_name2"]
+
+- name: Delete catalog using catalog id
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ catalog_id: [11, 34]
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the firmware catalog operation.
+ returned: always
+ type: str
+ sample: "Successfully triggered the job to create a catalog with Task ID : 10094"
+catalog_status:
+ description: Details of the catalog operation.
+ returned: When I(state) is C(present)
+ type: dict
+ sample: {
+ "AssociatedBaselines": [],
+ "BaseLocation": null,
+ "BundlesCount": 0,
+ "Filename": "catalog.gz",
+ "Id": 0,
+ "LastUpdated": null,
+ "ManifestIdentifier": null,
+ "ManifestVersion": null,
+ "NextUpdate": null,
+ "PredecessorIdentifier": null,
+ "ReleaseDate": null,
+ "ReleaseIdentifier": null,
+ "Repository": {
+ "CheckCertificate": true,
+ "Description": "HTTPS Desc",
+ "DomainName": null,
+ "Id": null,
+ "Name": "catalog4",
+ "Password": null,
+ "RepositoryType": "HTTPS",
+ "Source": "company.com",
+ "Username": null
+ },
+ "Schedule": null,
+ "SourcePath": "catalog",
+ "Status": null,
+ "TaskId": 10094
+ }
+job_id:
+ description: Job ID of the catalog task.
+ returned: When catalog job is in a running state
+ type: int
+ sample: 10123
+catalog_id:
+ description: IDs of the deleted catalog.
+ returned: When I(state) is C(absent)
+ type: int
+ sample: 10123
+associated_baselines:
+ description: IDs of the baselines associated with catalog.
+ returned: When I(state) is C(absent)
+ type: list
+ elements: dict
+ sample: [
+ {
+ "BaselineId": 24,
+ "BaselineName": "new"
+ },
+ {
+ "BaselineId": 25,
+ "BaselineName": "c7"
+ },
+ {
+ "BaselineId": 27,
+ "BaselineName": "c4"
+ }
+ ]
+error_info:
+ type: dict
+ description: Details of the http error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to create or update the catalog because a
+ repository with the same name already exists.",
+ "Resolution": "Enter a different name and retry the operation.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+
+'''
+
+JOB_URI = "JobService/Jobs({TaskId})"
+BASELINE_URI = "UpdateService/Baselines"
+CATALOG_URI = "UpdateService/Catalogs"
+CATALOG_URI_ID = "UpdateService/Catalogs({Id})"
+DELETE_CATALOG_URI = "UpdateService/Actions/UpdateService.RemoveCatalogs"
+CATALOG_JOB_RUNNING = "Catalog job '{name}' with ID {id} is running.Retry after job completion."
+CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied."
+CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No changes found to be applied."
+INVALID_CATALOG_ID = "Invalid catalog ID provided."
+CATALOG_DEL_SUCCESS = "Successfully deleted the firmware catalog(s)."
+CATALOG_BASELINE_ATTACHED = "Unable to delete the catalog as it is with baseline(s)."
+CATALOG_EXISTS = "The catalog with the name '{new_name}' already exists in the system."
+DELL_ONLINE_EXISTS = "Catalog with 'DELL_ONLINE' repository already exists with the name '{catalog_name}'."
+NAMES_ERROR = "Only delete operations accept multiple catalog names or IDs."
+CATALOG_ID_NOT_FOUND = "Catalog with ID '{catalog_id}' not found."
+CATALOG_NAME_NOT_FOUND = "Catalog '{catalog_name}' not found."
+CATALOG_UPDATED = "Successfully {operation} the firmware catalog."
+JOB_POLL_INTERVAL = 10
+SETTLING_TIME = 3
+
+import json
+import time
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+def check_existing_catalog(module, rest_obj, state, name=None):
+ catalog_cfgs = []
+ if name:
+ catalog_id = None
+ catalog_name = [name]
+ else:
+ catalog_id = module.params.get("catalog_id")
+ catalog_name = module.params.get("catalog_name")
+ resp = rest_obj.get_all_items_with_pagination(CATALOG_URI)
+ catalogs_detail = resp.get("value")
+ all_catalog = {}
+ if state == "present":
+ all_catalog = dict(
+ [(each_catalog["Repository"]["Name"], each_catalog["Repository"]["RepositoryType"]) for each_catalog in
+ catalogs_detail])
+ for each_catalog in catalogs_detail:
+ if catalog_name:
+ if each_catalog['Repository']['Name'] in catalog_name:
+ catalog_cfgs.append(each_catalog)
+ if state == "present":
+ break
+ continue
+ if catalog_id:
+ if each_catalog['Id'] in catalog_id:
+ catalog_cfgs.append(each_catalog)
+ if state == "present":
+ break
+ continue
+ return catalog_cfgs, all_catalog
+
+
+def get_updated_catalog_info(module, rest_obj, catalog_resp):
+ try:
+ catalog, all_catalog = check_existing_catalog(module, rest_obj, "present", name=catalog_resp["Repository"]["Name"])
+ except Exception:
+ catalog = catalog_resp
+ return catalog[0]
+
+
+def exit_catalog(module, rest_obj, catalog_resp, operation, msg):
+ if module.params.get("job_wait"):
+ job_failed, job_message = rest_obj.job_tracking(
+ catalog_resp.get('TaskId'), job_wait_sec=module.params["job_wait_timeout"], sleep_time=JOB_POLL_INTERVAL)
+ catalog = get_updated_catalog_info(module, rest_obj, catalog_resp)
+ if job_failed is True:
+ module.fail_json(msg=job_message, catalog_status=catalog)
+ catalog_resp = catalog
+ msg = CATALOG_UPDATED.format(operation=operation)
+ time.sleep(SETTLING_TIME)
+ catalog = get_updated_catalog_info(module, rest_obj, catalog_resp)
+ module.exit_json(msg=msg, catalog_status=catalog, changed=True)
+
+
+def _get_catalog_payload(params, name):
+ catalog_payload = {}
+ repository_type = params.get("repository_type")
+ if params.get("file_name") is not None:
+ catalog_payload["Filename"] = params["file_name"]
+ if params.get("source_path") is not None:
+ catalog_payload["SourcePath"] = params["source_path"]
+ repository_dict = {
+ "Name": name,
+ "Description": params.get("catalog_description"),
+ "RepositoryType": repository_type,
+ "Source": params.get("source"),
+ "CheckCertificate": params.get("check_certificate"),
+ }
+ if repository_type != "DELL_ONLINE":
+ repository_dict.update({"DomainName": params.get("repository_domain"),
+ "Username": params.get("repository_username"),
+ "Password": params.get("repository_password")
+ })
+ if repository_type == "DELL_ONLINE" and not params.get("source"):
+ repository_dict["Source"] = "downloads.dell.com"
+ repository_payload = dict([(k, v) for k, v in repository_dict.items() if v is not None])
+ if repository_payload:
+ catalog_payload["Repository"] = repository_payload
+ return catalog_payload
+
+
+def validate_dell_online(all_catalog, module):
+ """
+ only one dell_online repository type catalog creation is possible from ome
+ """
+ catalog_name = module.params["catalog_name"][0]
+ for name, repo_type in all_catalog.items():
+ if repo_type == "DELL_ONLINE" and name != catalog_name:
+ module.fail_json(
+ msg=DELL_ONLINE_EXISTS.format(
+ catalog_name=name))
+
+
+def create_catalog(module, rest_obj):
+ if module.check_mode:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
+ payload = _get_catalog_payload(module.params, module.params.get("catalog_name")[0])
+ resp = rest_obj.invoke_request("POST", CATALOG_URI, data=payload)
+ resp_data = resp.json_data
+ job_id = resp_data.get("TaskId")
+ msg = "Successfully triggered the job to create a catalog with Task Id : {0}".format(job_id)
+ exit_catalog(module, rest_obj, resp_data, "created", msg)
+
+
+def get_current_catalog_settings(current_payload):
+ catalog_payload = {}
+ if current_payload.get("Filename") is not None:
+ catalog_payload["Filename"] = current_payload["Filename"]
+ if current_payload.get("SourcePath") is not None:
+ catalog_payload["SourcePath"] = current_payload["SourcePath"]
+ repository_dict = {
+ "Name": current_payload["Repository"].get("Name"),
+ "Id": current_payload["Repository"].get("Id"),
+ "Description": current_payload["Repository"].get("Description"),
+ "RepositoryType": current_payload["Repository"].get("RepositoryType"),
+ "Source": current_payload["Repository"].get("Source"),
+ "DomainName": current_payload["Repository"].get("DomainName"),
+ "Username": current_payload["Repository"].get("Username"),
+ "Password": current_payload["Repository"].get("Password"),
+ "CheckCertificate": current_payload["Repository"].get("CheckCertificate"),
+ }
+ repository_payload = dict([(k, v) for k, v in repository_dict.items() if v is not None])
+ if repository_payload:
+ catalog_payload["Repository"] = repository_payload
+ return catalog_payload
+
+
+def compare_payloads(modify_payload, current_payload):
+ """
+ :param modify_payload: payload created to update existing setting
+ :param current_payload: already existing payload for specified baseline
+ :return: bool - compare existing and requested setting values of baseline in case of modify operations
+ if both are same return True
+ """
+ diff = False
+ for key, val in modify_payload.items():
+ if current_payload is None or current_payload.get(key) is None:
+ return True
+ elif isinstance(val, dict):
+ if compare_payloads(val, current_payload.get(key)):
+ return True
+ elif val != current_payload.get(key):
+ return True
+ return diff
+
+
+def modify_catalog(module, rest_obj, catalog_list, all_catalog):
+ params = module.params
+ catalog_id = catalog_list[0]["Id"]
+ name = catalog_list[0]["Repository"]["Name"]
+ modify_payload = _get_catalog_payload(module.params, name)
+ new_catalog_name = params.get("new_catalog_name")
+ if new_catalog_name:
+ if new_catalog_name != name and new_catalog_name in all_catalog:
+ module.fail_json(msg=CATALOG_EXISTS.format(new_name=new_catalog_name))
+ modify_payload["Repository"]["Name"] = new_catalog_name
+ catalog_payload = get_current_catalog_settings(catalog_list[0])
+ if modify_payload.get("Repository") and \
+ modify_payload.get("Repository").get("RepositoryType") and \
+ modify_payload.get("Repository").get("RepositoryType") != catalog_payload["Repository"]["RepositoryType"]:
+ module.fail_json(msg="Repository type cannot be changed to another repository type.")
+ new_catalog_current_setting = catalog_payload.copy()
+ repo_id = new_catalog_current_setting["Repository"]["Id"]
+ del new_catalog_current_setting["Repository"]["Id"]
+ diff = compare_payloads(modify_payload, new_catalog_current_setting)
+ if module.check_mode and diff:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
+ if not diff:
+ module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False)
+ new_catalog_current_setting["Repository"].update(modify_payload["Repository"])
+ catalog_payload.update(modify_payload)
+ catalog_payload["Repository"] = new_catalog_current_setting["Repository"]
+ catalog_payload["Repository"]["Id"] = repo_id
+ catalog_payload["Id"] = catalog_id
+ catalog_put_uri = CATALOG_URI_ID.format(Id=catalog_id)
+ resp = rest_obj.invoke_request('PUT', catalog_put_uri, data=catalog_payload)
+ resp_data = resp.json_data
+ job_id = resp_data.get("TaskId")
+ msg = "Successfully triggered the job to update a catalog with Task Id : {0}".format(job_id)
+ exit_catalog(module, rest_obj, resp_data, "modified", msg)
+
+
+def validate_delete_operation(rest_obj, module, catalog_list, delete_ids):
+ associated_baselines = []
+ for catalog in catalog_list:
+ if catalog.get('AssociatedBaselines'):
+ associated_baselines.append({"catalog_id": catalog["Id"],
+ "associated_baselines": catalog.get("AssociatedBaselines")})
+ if catalog.get('Status') != "Completed":
+ resp = rest_obj.invoke_request("GET", JOB_URI.format(TaskId=catalog['TaskId']))
+ job_data = resp.json_data
+ if job_data['LastRunStatus']['Id'] == 2050:
+ module.fail_json(msg=CATALOG_JOB_RUNNING.format(name=catalog["Name"], id=catalog["Id"]),
+ job_id=catalog['TaskId'])
+ if associated_baselines:
+ module.fail_json(msg=CATALOG_BASELINE_ATTACHED, associated_baselines=associated_baselines)
+ if module.check_mode and len(catalog_list) > 0:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True, catalog_id=delete_ids)
+ if len(catalog_list) == 0:
+ module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False)
+
+
+def delete_catalog(module, rest_obj, catalog_list):
+ delete_ids = [d["Id"] for d in catalog_list]
+ validate_delete_operation(rest_obj, module, catalog_list, delete_ids)
+ delete_payload = {"CatalogIds": delete_ids}
+ rest_obj.invoke_request('POST', DELETE_CATALOG_URI, data=delete_payload)
+ module.exit_json(msg=CATALOG_DEL_SUCCESS, changed=True, catalog_id=delete_ids)
+
+
+def validate_names(state, module):
+ """
+ The state present doest not supports more than one name/id
+ """
+ catalog_name = module.params.get("catalog_name", [])
+ catalog_id = module.params.get("catalog_id", [])
+ if state != "absent" and ((catalog_name and len(catalog_name) > 1) or (catalog_id and len(catalog_id) > 1)):
+ module.fail_json(msg=NAMES_ERROR)
+
+
+def perform_present_action(module, rest_obj, requested_catalog_list, all_catalog):
+ if requested_catalog_list:
+ modify_catalog(module, rest_obj, requested_catalog_list, all_catalog)
+ else:
+ if module.params.get('catalog_id'):
+ module.fail_json(msg=INVALID_CATALOG_ID)
+ repository_type = module.params.get("repository_type")
+ if repository_type and repository_type == "DELL_ONLINE":
+ validate_dell_online(all_catalog, module)
+ create_catalog(module, rest_obj)
+
+
+def main():
+ specs = {
+ "state": {"default": "present", "choices": ['present', 'absent']},
+ "catalog_name": {"type": 'list', "elements": 'str'},
+ "new_catalog_name": {"type": 'str'},
+ "catalog_id": {"type": 'list', "elements": 'int'},
+ "catalog_description": {"required": False, "type": 'str'},
+ "source": {"required": False, "type": 'str'},
+ "source_path": {"required": False, "type": 'str'},
+ "file_name": {"required": False, "type": 'str'},
+ "repository_type": {"required": False,
+ "choices": ["NFS", "CIFS", "HTTP", "HTTPS", "DELL_ONLINE"]},
+ "repository_username": {"required": False, "type": 'str'},
+ "repository_password": {"required": False, "type": 'str', "no_log": True},
+ "repository_domain": {"required": False, "type": 'str'},
+ "check_certificate": {"required": False, "type": 'bool', "default": False},
+ "job_wait": {"type": 'bool', "default": True},
+ "job_wait_timeout": {"type": 'int', "default": 600}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ['state', 'present',
+ ['repository_type'], False],
+ ['state', 'present',
+ ['new_catalog_name', 'catalog_description', 'catalog_name', 'catalog_id', 'source', 'source_path',
+ 'file_name', 'repository_type', 'repository_username', 'repository_password',
+ 'repository_domain', 'check_certificate'], True],
+ ],
+ mutually_exclusive=[('catalog_name', 'catalog_id')],
+ required_one_of=[('catalog_name', 'catalog_id')],
+ supports_check_mode=True)
+
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ state = module.params['state']
+ validate_names(state, module)
+ requested_catalog_list, all_catalog = check_existing_catalog(module, rest_obj, state)
+ if state == 'absent':
+ delete_catalog(module, rest_obj, requested_catalog_list)
+ else:
+ perform_present_action(module, rest_obj, requested_catalog_list, all_catalog)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py
new file mode 100644
index 000000000..411a6221a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py
@@ -0,0 +1,452 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_groups
+short_description: Manages static device groups on OpenManage Enterprise
+description: This module allows to create, modify, and delete static device groups on OpenManage Enterprise.
+version_added: "3.5.0"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+extends_documentation_fragment:
+ - dellemc.openmanage.oment_auth_options
+options:
+ state:
+ type: str
+ description:
+ - C(present) allows to create or modify a device group.
+ - C(absent) allows to delete a device group.
+ choices: [present, absent]
+ default: present
+ name:
+ type: list
+ elements: str
+ description:
+ - Name of the device group to be created, modified, or deleted.
+ - If I(state) is absent, multiple names can be provided.
+ - This option is case insensitive.
+ - This option is mutually exclusive with I(group_id).
+ group_id:
+ type: list
+ elements: int
+ description:
+ - ID of the device group to be created, modified, or deleted.
+ - If I(state) is absent, multiple IDs can be provided.
+ - This option is mutually exclusive with I(name).
+ new_name:
+ type: str
+ description:
+ - New name for the existing device group.
+ - This is applicable only when I(state) is C(present).
+ description:
+ type: str
+ description:
+ - Description for the device group.
+ - This is applicable only when I(state) is C(present).
+ parent_group_name:
+ type: str
+ default: "Static Groups"
+ description:
+ - Name of the parent device group under which the device group to be created or modified.
+ - This is applicable only when I(state) is C(present).
+ - C(NOTE) If device group with such a name does not exist, device group with I(parent_group_name) is created.
+ - This option is case insensitive.
+ - This option is mutually exclusive with I(parent_group_id).
+ parent_group_id:
+ type: int
+ description:
+ - ID of the parent device group under which the device group to be created or modified.
+ - This is applicable only when I(state) is C(present).
+ - This option is mutually exclusive with I(parent_group_name).
+requirements:
+ - "python >= 3.8.6"
+notes:
+ - This module manages only static device groups on Dell EMC OpenManage Enterprise.
+ - If a device group with the name I(parent_group_name) does not exist, a new device group with the same name is created.
+ - Make sure the entered parent group is not the descendant of the provided group.
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Create a new device group
+ dellemc.openmanage.ome_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "group 1"
+ description: "Group 1 description"
+ parent_group_name: "group parent 1"
+
+- name: Modify a device group using the group ID
+ dellemc.openmanage.ome_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ group_id: 1234
+ description: "Group description updated"
+ parent_group_name: "group parent 2"
+
+- name: Delete a device group using the device group name
+ dellemc.openmanage.ome_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ name: "group 1"
+
+- name: Delete multiple device groups using the group IDs
+ dellemc.openmanage.ome_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ group_id:
+ - 1234
+ - 5678
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the device group operation.
+ returned: always
+ sample: "Successfully deleted the device group(s)."
+group_status:
+ description: Details of the device group operation status.
+ returned: success
+ type: dict
+ sample: {
+ "Description": "my group description",
+ "Id": 12123,
+ "MembershipTypeId": 12,
+ "Name": "group 1",
+ "ParentId": 12345,
+ "TypeId": 3000,
+ "IdOwner": 30,
+ "CreatedBy": "admin",
+ "CreationTime": "2021-01-01 10:10:10.100",
+ "DefinitionDescription": "UserDefined",
+ "DefinitionId": 400,
+ "GlobalStatus": 5000,
+ "HasAttributes": false,
+ "UpdatedBy": "",
+ "UpdatedTime": "2021-01-01 11:11:10.100",
+ "Visible": true
+ }
+group_ids:
+ type: list
+ elements: int
+ description: List of the deleted device group IDs.
+ returned: when I(state) is C(absent)
+ sample: [1234, 5678]
+invalid_groups:
+ type: list
+ elements: str
+ description: List of the invalid device group IDs or names.
+ returned: when I(state) is C(absent)
+ sample: [1234, 5678]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGRP9013",
+ "RelatedProperties": [],
+ "Message": "Unable to update group 12345 with the provided parent 54321 because a group/parent
+ relationship already exists.",
+ "MessageArgs": [
+ "12345",
+ "54321"
+ ],
+ "Severity": "Warning",
+ "Resolution": "Make sure the entered parent ID does not create a bidirectional relationship and retry
+ the operation."
+ }
+ ]
+ }
+}
+"""
+
+import json
+import time
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+GROUP_URI = "GroupService/Groups"
+OP_URI = "GroupService/Actions/GroupService.{op}Group"
+# GROUPS_HIERARCHY = "GroupService/AllGroupsHierarchy"
+MULTIPLE_GROUPS_MSG = "Provide only one unique device group when state is present."
+NONEXIST_GROUP_ID = "A device group with the provided ID does not exist."
+NONEXIST_PARENT_ID = "A parent device group with the provided ID does not exist."
+INVALID_PARENT = "The provided parent device group is not a valid user-defined static device group."
+INVALID_GROUPS_DELETE = "Provide valid static device group(s) for deletion."
+INVALID_GROUPS_MODIFY = "Provide valid static device group for modification."
+PARENT_CREATION_FAILED = "Unable to create a parent device group with the name {pname}."
+CREATE_SUCCESS = "Successfully {op}d the device group."
+GROUP_PARENT_SAME = "Provided parent and the device group cannot be the same."
+GROUP_NAME_EXISTS = "Unable to rename the group because a group with the provided name '{gname}' already exists."
+DELETE_SUCCESS = "Successfully deleted the device group(s)."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+STATIC_ROOT = 'Static Groups'
+SETTLING_TIME = 2
+
+
+def get_valid_groups(module, rest_obj, group_arg, group_set):
+ parent = {}
+ static_root = {}
+ group_dict = {}
+ group_resp = rest_obj.get_all_items_with_pagination(GROUP_URI)
+ if module.params.get('state') == 'absent':
+ group_dict = dict([(str(g[group_arg]).lower(), g) for g in group_resp.get("value")
+ if str(g[group_arg]).lower() in group_set])
+ else:
+ parg = module.params.get('parent_group_id')
+ if parg: # Checking id first as name has a default value
+ pkey = 'Id'
+ else:
+ pkey = 'Name'
+ parg = module.params.get('parent_group_name')
+ count = 0
+ for g in group_resp.get("value"):
+ if str(g[group_arg]).lower() in group_set:
+ group_dict = g
+ count = count + 1
+ if str(g[pkey]).lower() == str(parg).lower():
+ parent = g
+ count = count + 1
+ if g['Name'] == STATIC_ROOT:
+ static_root = g
+ count = count + 1
+ if count == 3:
+ break
+ return group_dict, parent, static_root
+
+
+def is_valid_static_group(grp):
+ if grp['TypeId'] == 3000 and grp['MembershipTypeId'] == 12:
+ return True
+ return False
+
+
+def create_parent(rest_obj, module, static_root):
+ try:
+ prt = static_root
+ payload = {}
+ payload['MembershipTypeId'] = 12 # Static members
+ payload['Name'] = module.params.get('parent_group_name')
+ payload['ParentId'] = prt['Id']
+ prt_resp = rest_obj.invoke_request('POST', OP_URI.format(op='Create'), data={"GroupModel": payload})
+ return int(prt_resp.json_data)
+ except Exception:
+ return static_root['Id']
+
+
+def get_parent_id(rest_obj, module, parent, static_root):
+ parent_id = module.params.get("parent_group_id")
+ if parent_id: # Checking id first as name has a default value
+ if not parent:
+ module.fail_json(msg=NONEXIST_PARENT_ID)
+ if parent['Name'] != STATIC_ROOT:
+ if not is_valid_static_group(parent):
+ module.fail_json(msg=INVALID_PARENT)
+ return parent['Id']
+ else:
+ if parent:
+ if parent['Name'] != STATIC_ROOT:
+ if not is_valid_static_group(parent):
+ module.fail_json(msg=INVALID_PARENT)
+ return parent['Id']
+ else:
+ if module.check_mode:
+ return 0
+ else:
+ prtid = create_parent(rest_obj, module, static_root)
+ time.sleep(SETTLING_TIME)
+ return prtid
+ return static_root['Id']
+
+
+def get_ome_group_by_name(rest_obj, name):
+ grp = {}
+ try:
+ resp = rest_obj.invoke_request("GET", GROUP_URI, query_param={"$filter": "Name eq '{0}'".format(name)})
+ group_resp = resp.json_data.get('value')
+ if group_resp:
+ grp = group_resp[0]
+ except Exception:
+ grp = {}
+ return grp
+
+
+def get_ome_group_by_id(rest_obj, id):
+ grp = {}
+ try:
+ resp = rest_obj.invoke_request('GET', GROUP_URI + "({0})".format(id))
+ grp = resp.json_data
+ except Exception:
+ grp = {}
+ return grp
+
+
+def exit_group_operation(module, rest_obj, payload, operation):
+ group_resp = rest_obj.invoke_request('POST', OP_URI.format(op=operation), data={"GroupModel": payload})
+ cid = int(group_resp.json_data)
+ time.sleep(SETTLING_TIME)
+ try:
+ grp = get_ome_group_by_id(rest_obj, cid)
+ group = rest_obj.strip_substr_dict(grp)
+ except Exception:
+ payload['Id'] = cid
+ group = payload
+ module.exit_json(changed=True, msg=CREATE_SUCCESS.format(op=operation.lower()), group_status=group)
+
+
+def create_group(rest_obj, module, parent, static_root):
+ payload = {}
+ payload['MembershipTypeId'] = 12 # Static members
+ mparams = module.params
+ payload['Name'] = mparams.get('name')[0]
+ if mparams.get('parent_group_name').lower() == payload['Name'].lower():
+ module.fail_json(msg=GROUP_PARENT_SAME)
+ parent_id = get_parent_id(rest_obj, module, parent, static_root)
+ payload['ParentId'] = parent_id
+ if mparams.get('description'):
+ payload['Description'] = mparams.get('description')
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHANGES_FOUND)
+ exit_group_operation(module, rest_obj, payload, 'Create')
+
+
+def modify_group(rest_obj, module, valid_group_dict, parent, static_root):
+ if not is_valid_static_group(valid_group_dict):
+ module.fail_json(msg=INVALID_GROUPS_MODIFY)
+ grp = valid_group_dict
+ diff = 0
+ payload = dict([(k, grp.get(k)) for k in ["Name", "Description", "MembershipTypeId", "ParentId", "Id"]])
+ new_name = module.params.get('new_name')
+ if new_name:
+ if new_name != payload['Name']:
+ dup_grp = get_ome_group_by_name(rest_obj, new_name)
+ if dup_grp:
+ module.fail_json(msg=GROUP_NAME_EXISTS.format(gname=new_name))
+ payload['Name'] = new_name
+ diff += 1
+ desc = module.params.get('description')
+ if desc:
+ if desc != payload['Description']:
+ payload['Description'] = desc
+ diff += 1
+ parent_id = get_parent_id(rest_obj, module, parent, static_root)
+ if parent_id == payload['Id']:
+ module.fail_json(msg=GROUP_PARENT_SAME)
+ if parent_id != payload['ParentId']:
+ payload['ParentId'] = parent_id
+ diff += 1
+ if diff == 0:
+ gs = rest_obj.strip_substr_dict(grp)
+ module.exit_json(msg=NO_CHANGES_MSG, group_status=gs)
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHANGES_FOUND)
+ exit_group_operation(module, rest_obj, payload, 'Update')
+
+
+def delete_groups(rest_obj, module, group_set, group_dict):
+ deletables = []
+ invalids = []
+ for g in group_set:
+ grp = group_dict.get(str(g).lower())
+ if grp:
+ if is_valid_static_group(grp): # For Query Groups MembershipTypeId = 24
+ deletables.append(grp['Id'])
+ else:
+ invalids.append(g)
+ if invalids:
+ module.fail_json(msg=INVALID_GROUPS_DELETE, invalid_groups=invalids)
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHANGES_FOUND, group_ids=deletables)
+ rest_obj.invoke_request("POST", OP_URI.format(op='Delete'), data={"GroupIds": deletables})
+ module.exit_json(changed=True, msg=DELETE_SUCCESS, group_ids=deletables)
+
+
+def main():
+ specs = {
+ "name": {"type": "list", "elements": 'str'},
+ "group_id": {"type": "list", "elements": 'int'},
+ "state": {"type": "str", "choices": ["present", "absent"], "default": "present"},
+ "description": {"type": "str"},
+ "new_name": {"type": "str"},
+ "parent_group_name": {"type": "str", "default": STATIC_ROOT},
+ "parent_group_id": {"type": "int"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ["state", "present", ("new_name", "description", "parent_group_name", "parent_group_id"), True],
+ ],
+ mutually_exclusive=[
+ ("name", "group_id"), ("parent_group_name", "parent_group_id"),
+ ],
+ required_one_of=[("name", "group_id")],
+ supports_check_mode=True
+ )
+
+ try:
+ if module.params.get('name'):
+ group_arg = 'Name'
+ group_set = set(v.lower() for v in module.params.get('name'))
+ else:
+ group_arg = 'Id'
+ group_set = set(str(v).lower() for v in module.params.get('group_id'))
+ if len(group_set) != 1 and module.params['state'] == 'present':
+ module.fail_json(msg=MULTIPLE_GROUPS_MSG)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ valid_group_dict, parent, static_root = get_valid_groups(module, rest_obj, group_arg, group_set)
+ if module.params["state"] == "absent":
+ if valid_group_dict:
+ delete_groups(rest_obj, module, group_set, valid_group_dict)
+ module.exit_json(msg=NO_CHANGES_MSG)
+ else:
+ if valid_group_dict:
+ modify_group(rest_obj, module, valid_group_dict, parent, static_root)
+ elif group_arg == 'Id':
+ module.fail_json(msg=NONEXIST_GROUP_ID)
+ create_group(rest_obj, module, parent, static_root)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py
new file mode 100644
index 000000000..4906dcf55
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py
@@ -0,0 +1,603 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_identity_pool
+short_description: Manages identity pool settings on OpenManage Enterprise
+version_added: "2.1.0"
+description: This module allows to create, modify, or delete a single identity pool on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ state:
+ description:
+ - C(present) modifies an existing identity pool. If the provided I (pool_name) does not exist,
+ it creates an identity pool.
+ - C(absent) deletes an existing identity pool.
+ type: str
+ default: present
+ choices: [present, absent]
+ pool_name:
+ type: str
+ required: True
+ description:
+ - This option is mandatory for I(state) when creating, modifying and deleting an identity pool.
+ new_pool_name:
+ type: str
+ description:
+ - After creating an identity pool, I(pool_name) can be changed to I(new_pool_name).
+ - This option is ignored when creating an identity pool.
+ pool_description:
+ type: str
+ description:
+ - Description of the identity pool.
+ ethernet_settings:
+ type: dict
+ description:
+ - Applicable for creating and modifying an identity pool using Ethernet settings.
+ - I(starting_mac_address) and I(identity_count) are required to create an identity pool.
+ suboptions:
+ starting_mac_address:
+ description: Starting MAC address of the ethernet setting.
+ type: str
+ identity_count:
+ description: Number of MAC addresses.
+ type: int
+ fcoe_settings:
+ type: dict
+ description:
+ - Applicable for creating and modifying an identity pool using FCoE settings.
+ - I(starting_mac_address) and I(identity_count) are required to create an identity pool.
+ suboptions:
+ starting_mac_address:
+ description: Starting MAC Address of the FCoE setting.
+ type: str
+ identity_count:
+ description: Number of MAC addresses.
+ type: int
+ iscsi_settings:
+ type: dict
+ description:
+ - Applicable for creating and modifying an identity pool using ISCSI settings.
+ - I(starting_mac_address), I(identity_count), I(iqn_prefix), I(ip_range) and I(subnet_mask) are
+ required to create an identity pool.
+ suboptions:
+ starting_mac_address:
+ description: Starting MAC address of the iSCSI setting.This is required option for iSCSI setting.
+ type: str
+ identity_count:
+ description: Number of MAC addresses.
+ type: int
+ initiator_config:
+ type: dict
+ description:
+ - Applicable for creating and modifying an identity pool using iSCSI Initiator settings.
+ suboptions:
+ iqn_prefix:
+ description: IQN prefix addresses.
+ type: str
+ initiator_ip_pool_settings:
+ type: dict
+ description:
+ - Applicable for creating and modifying an identity pool using ISCSI Initiator IP pool settings.
+ suboptions:
+ ip_range:
+ description: Range of non-multicast IP addresses.
+ type: str
+ subnet_mask:
+ description: Subnet mask for I(ip_range).
+ type: str
+ gateway:
+ description: IP address of gateway.
+ type: str
+ primary_dns_server:
+ description: IP address of the primary DNS server.
+ type: str
+ secondary_dns_server:
+ description: IP address of the secondary DNS server.
+ type: str
+ fc_settings:
+ type: dict
+ description:
+ - Applicable for creating and modifying an identity pool using fibre channel(FC) settings.
+ - This option allows OpenManage Enterprise to generate a Worldwide port name (WWPN) and Worldwide node name (WWNN) address.
+ - The value 0x2001 is beginning to the starting address for the generation of a WWPN, and 0x2000 for a WWNN.
+ - I(starting_address) and I(identity_count) are required to create an identity pool.
+ suboptions:
+ starting_address:
+ description: Starting MAC Address of FC setting.I(starting_address) is required to option to create FC settings.
+ type: str
+ identity_count:
+ description: Number of MAC addresses.I(identity_count) is required to option to create FC settings.
+ type: int
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+ - "Deepak Joshi(@Dell-Deepak-Joshi))"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create an identity pool using ethernet, FCoE, iSCSI and FC settings
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ pool_name: "pool1"
+ pool_description: "Identity pool with Ethernet, FCoE, iSCSI and FC settings"
+ ethernet_settings:
+ starting_mac_address: "50:50:50:50:50:00"
+ identity_count: 60
+ fcoe_settings:
+ starting_mac_address: "70:70:70:70:70:00"
+ identity_count: 75
+ iscsi_settings:
+ starting_mac_address: "60:60:60:60:60:00"
+ identity_count: 30
+ initiator_config:
+ iqn_prefix: "iqn.myprefix."
+ initiator_ip_pool_settings:
+ ip_range: "10.33.0.1-10.33.0.255"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.4.1"
+ primary_dns_server : "10.8.8.8"
+ secondary_dns_server : "8.8.8.8"
+ fc_settings:
+ starting_address: "30:30:30:30:30:00"
+ identity_count: 45
+
+- name: Create an identity pool using only ethernet settings
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ pool_name: "pool2"
+ pool_description: "create identity pool with ethernet"
+ ethernet_settings:
+ starting_mac_address: "aa-bb-cc-dd-ee-aa"
+ identity_count: 80
+
+- name: Modify an identity pool
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ pool_name: "pool2"
+ new_pool_name: "pool3"
+ pool_description: "modifying identity pool with ethernet and fcoe settings"
+ ethernet_settings:
+ starting_mac_address: "90-90-90-90-90-90"
+ identity_count: 61
+ fcoe_settings:
+ starting_mac_address: "aabb.ccdd.5050"
+ identity_count: 77
+
+- name: Modify an identity pool using iSCSI and FC settings
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ ca_path: "/path/to/ca_cert.pem"
+ pool_name: "pool_new"
+ new_pool_name: "pool_new2"
+ pool_description: "modifying identity pool with iscsi and fc settings"
+ iscsi_settings:
+ identity_count: 99
+ initiator_config:
+ iqn_prefix: "iqn1.myprefix2."
+ initiator_ip_pool_settings:
+ gateway: "192.168.4.5"
+ fc_settings:
+ starting_address: "10:10:10:10:10:10"
+ identity_count: 98
+
+- name: Delete an identity pool
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ pool_name: "pool2"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the identity pool operation.
+ returned: always
+ sample: "Successfully created an identity pool."
+pool_status:
+ type: dict
+ description: Details of the user operation, when I(state) is C(present).
+ returned: success
+ sample: {
+ "Id":29,
+ "IsSuccessful":True,
+ "Issues":[]
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [{
+ "Message": "Unable to process the request because an error occurred:
+ Ethernet-MAC Range overlap found (in this Identity Pool or in a different one) .",
+ "MessageArgs": [Ethernet-MAC Range overlap found (in this Identity Pool or in a different one)"],
+ "MessageId": "CGEN6001",
+ "RelatedProperties": [],
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator.",
+ "Severity": "Critical"
+ }],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }}
+'''
+
+import re
+import json
+import codecs
+import binascii
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+IDENTITY_URI = "IdentityPoolService/IdentityPools"
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+
+
+def get_identity_pool_id_by_name(pool_name, rest_obj):
+ pool_id = 0
+ attributes = None
+ identity_list = rest_obj.get_all_report_details(IDENTITY_URI)["report_list"]
+ for item in identity_list:
+ if pool_name == item["Name"]:
+ pool_id = item["Id"]
+ attributes = item
+ break
+ return pool_id, attributes
+
+
+def mac_validation(mac_input):
+ match_found = re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$|"
+ "([0-9a-f]{4}([.])[0-9a-f]{4}([.])[0-9a-f]{4})$", mac_input.lower())
+ return match_found
+
+
+def mac_to_base64_conversion(mac_address, module):
+ try:
+ if mac_address:
+ allowed_mac_separators = [':', '-', '.']
+ for sep in allowed_mac_separators:
+ if sep in mac_address:
+ b64_mac_address = codecs.encode(codecs.decode(
+ mac_address.replace(sep, ''), 'hex'), 'base64')
+ address = codecs.decode(b64_mac_address, 'utf-8').rstrip()
+ return address
+ except binascii.Error:
+ module.fail_json(msg='Encoding of MAC address {0} to base64 '
+ 'failed'.format(mac_address))
+
+
+def update_modify_setting(modify_payload, existing_payload, setting_type, sub_keys):
+ """update current pool sub setting setting to modify payload if not provided
+ in the options to avoid the null update from ome"""
+ for sub_key in sub_keys:
+ if sub_key not in modify_payload[setting_type] and sub_key in existing_payload[setting_type]:
+ modify_payload[setting_type][sub_key] = existing_payload[setting_type][sub_key]
+ elif existing_payload[setting_type]:
+ if modify_payload[setting_type].get(sub_key) and existing_payload[setting_type].get(sub_key):
+ modify_setting = modify_payload[setting_type][sub_key]
+ existing_setting_payload = existing_payload[setting_type][sub_key]
+ diff_item = list(set(existing_setting_payload) - set(modify_setting))
+ for key in diff_item:
+ modify_payload[setting_type][sub_key][key] = existing_setting_payload[key]
+
+
+def get_updated_modify_payload(modify_payload, existing_payload):
+ """update current pool setting setting to modify payload if not provided
+ in the options to avoid the null update from ome"""
+ remove_unwanted_key_list = ['@odata.type', '@odata.id', 'CreatedBy', 'CreationTime', 'LastUpdatedBy',
+ 'LastUpdateTime', 'UsageCounts', 'UsageIdentitySets@odata.navigationLink']
+ [existing_payload.pop(key) for key in remove_unwanted_key_list if key in existing_payload]
+ for key, val in existing_payload.items():
+ if key not in modify_payload:
+ modify_payload[key] = val
+ else:
+ if existing_payload.get(key) and key == "EthernetSettings" or key == "FcoeSettings":
+ update_modify_setting(modify_payload, existing_payload, key, ["Mac"])
+ elif existing_payload.get(key) and key == "FcSettings":
+ update_modify_setting(modify_payload, existing_payload, key, ["Wwnn", "Wwpn"])
+ elif existing_payload.get(key) and key == "IscsiSettings":
+ update_modify_setting(modify_payload, existing_payload, key,
+ ["Mac", "InitiatorConfig", "InitiatorIpPoolSettings"])
+ modify_payload = dict([(k, v) for k, v in modify_payload.items() if v is not None])
+ return modify_payload
+
+
+def update_mac_settings(payload, settings_params, setting_type, module):
+ """payload update for ethernet and fcoe settings and isci settings
+ and convert to MAC address to base 64 format"""
+ mac_address = settings_params.get("starting_mac_address")
+ mac_base_64_format = None
+ if mac_address:
+ match_found = mac_validation(mac_address)
+ if match_found:
+ mac_base_64_format = mac_to_base64_conversion(mac_address, module)
+ else:
+ module.fail_json(msg="Please provide the valid MAC address format for {0} settings."
+ .format(setting_type.split('Settings')[0]))
+ sub_setting_mapper = {"StartingMacAddress": mac_base_64_format,
+ "IdentityCount": settings_params.get("identity_count")}
+ sub_settings_payload = dict([(k, v) for k, v in sub_setting_mapper.items() if v is not None])
+ if any(sub_settings_payload):
+ payload.update({setting_type: {"Mac": sub_settings_payload}})
+
+
+def update_iscsi_specific_settings(payload, settings_params, setting_type):
+ """payload update for Iscsi specific settings"""
+ sub_setting_mapper = {}
+ initiator_config = settings_params.get("initiator_config")
+ if initiator_config and initiator_config.get("iqn_prefix"):
+ sub_setting_mapper.update({
+ "InitiatorConfig": {"IqnPrefix": initiator_config.get("iqn_prefix")}})
+ if settings_params.get("initiator_ip_pool_settings"):
+ initiator_ip_pool_settings = settings_params["initiator_ip_pool_settings"]
+ initiator_ip_pool_settings = {"IpRange": initiator_ip_pool_settings.get("ip_range"),
+ "SubnetMask": initiator_ip_pool_settings.get("subnet_mask"),
+ "Gateway": initiator_ip_pool_settings.get("gateway"),
+ "PrimaryDnsServer": initiator_ip_pool_settings.get("primary_dns_server"),
+ "SecondaryDnsServer": initiator_ip_pool_settings.get("secondary_dns_server")}
+ initiator_ip_pool_settings = dict([(k, v) for k, v in initiator_ip_pool_settings.items() if v is not None])
+ sub_setting_mapper.update({
+ "InitiatorIpPoolSettings": initiator_ip_pool_settings})
+ if any(sub_setting_mapper):
+ if "IscsiSettings" in payload:
+ """update MAC address setting"""
+ sub_setting_mapper.update(payload[setting_type])
+ sub_setting_mapper = dict([(key, val) for key, val in sub_setting_mapper.items() if any(val)])
+ payload.update({setting_type: sub_setting_mapper})
+
+
+def get_wwn_address_prefix(starting_address):
+ """Prefix wwnn and wwpn MAC address with 20x00 and 20x01 respectively"""
+ delimiter, wwnn_prefix, wwpn_prefix = None, None, None
+ if "." in starting_address:
+ delimiter = "."
+ elif ":" in starting_address:
+ delimiter = ":"
+ elif "-" in starting_address:
+ delimiter = "-"
+ length = len(starting_address.split(delimiter)[0])
+ if length == 4:
+ wwnn_prefix = "2000{0}".format(delimiter)
+ wwpn_prefix = "2001{0}".format(delimiter)
+ else:
+ wwnn_prefix = "20{0}00{0}".format(delimiter)
+ wwpn_prefix = "20{0}01{0}".format(delimiter)
+ return wwnn_prefix, wwpn_prefix
+
+
+def update_fc_settings(payload, settings_params, setting_type, module):
+ """payload update for Fibre Channel specific settings
+ payload: other setting payload
+ settings_params: fc setting parameters
+ setting_type: "FcSettings"
+ """
+ sub_setting_mapper = {}
+ starting_address = settings_params.get("starting_address")
+ identity_count = settings_params.get("identity_count")
+ wwnn_payload = {}
+ wwpn_payload = {}
+ if starting_address:
+ if not mac_validation(starting_address):
+ module.fail_json(msg="Please provide the valid starting address format for FC settings.")
+ wwnn_prefix, wwpn_prefix = get_wwn_address_prefix(starting_address)
+ wwnn_address = mac_to_base64_conversion(wwnn_prefix + starting_address, module)
+ wwpn_address = mac_to_base64_conversion(wwpn_prefix + starting_address, module)
+ wwnn_payload.update({"StartingAddress": wwnn_address})
+ wwpn_payload.update({"StartingAddress": wwpn_address})
+ if identity_count is not None:
+ wwnn_payload.update({"IdentityCount": identity_count})
+ wwpn_payload.update({"IdentityCount": identity_count})
+ sub_setting_mapper.update({"Wwnn": wwnn_payload,
+ "Wwpn": wwpn_payload})
+ sub_setting_mapper = dict([(key, val) for key, val in sub_setting_mapper.items() if any(val)])
+ if any(sub_setting_mapper):
+ payload.update({setting_type: sub_setting_mapper})
+
+
+def get_payload(module, pool_id=None):
+ """create payload for create and modify operations"""
+ module_params = module.params
+ setting_payload = {
+ "Description": module_params.get("pool_description"),
+ "Name": module_params["pool_name"]
+ }
+ fcoe_settings_params = module_params.get("fcoe_settings")
+ ethernet_settings_params = module_params.get("ethernet_settings")
+ iscsi_settings_params = module_params.get("iscsi_settings")
+ fc_settings_params = module_params.get("fc_settings")
+ if fcoe_settings_params:
+ update_mac_settings(setting_payload, fcoe_settings_params, "FcoeSettings", module)
+ if ethernet_settings_params:
+ update_mac_settings(setting_payload, ethernet_settings_params, "EthernetSettings", module)
+ if iscsi_settings_params:
+ update_mac_settings(setting_payload, iscsi_settings_params, "IscsiSettings", module)
+ update_iscsi_specific_settings(setting_payload, iscsi_settings_params, "IscsiSettings")
+ if fc_settings_params:
+ update_fc_settings(setting_payload, fc_settings_params, "FcSettings", module)
+ if pool_id:
+ new_name = module_params.get("new_pool_name")
+ if new_name is not None:
+ setting_payload.update({"Name": new_name})
+ setting_payload["Id"] = pool_id
+ payload = dict([(k, v) for k, v in setting_payload.items() if v is not None])
+ return payload
+
+
+def compare_nested_dict(modify_setting_payload, existing_setting_payload):
+ """compare existing and requested setting values of identity pool in case of modify operations
+ if both are same return True"""
+ for key, val in modify_setting_payload.items():
+ if existing_setting_payload is None or existing_setting_payload.get(key) is None:
+ return False
+ elif isinstance(val, dict):
+ if not compare_nested_dict(val, existing_setting_payload.get(key)):
+ return False
+ elif val != existing_setting_payload.get(key):
+ return False
+ return True
+
+
+def validate_modify_create_payload(setting_payload, module, action):
+ for key, val in setting_payload.items():
+ if key in ["EthernetSettings", "FcoeSettings"] and val:
+ sub_config = val.get("Mac")
+ if sub_config is None or not all([sub_config.get("IdentityCount"), sub_config.get("StartingMacAddress")]):
+ module.fail_json(msg="Both starting MAC address and identity count is required to {0} an"
+ " identity pool using {1} settings.".format(action, ''.join(key.split('Settings'))))
+ elif key == "FcSettings" and val:
+ sub_config = val.get("Wwnn")
+ if sub_config is None or not all([sub_config.get("IdentityCount"), sub_config.get("StartingAddress")]):
+ module.fail_json(msg="Both starting MAC address and identity count is required to"
+ " {0} an identity pool using Fc settings.".format(action))
+ elif key == "IscsiSettings" and val:
+ sub_config1 = val.get("Mac")
+ sub_config2 = val.get("InitiatorIpPoolSettings")
+ if sub_config1 is None or not all([sub_config1.get("IdentityCount"), sub_config1.get("StartingMacAddress")]):
+ module.fail_json(msg="Both starting MAC address and identity count is required to {0} an"
+ " identity pool using {1} settings.".format(action, ''.join(key.split('Settings'))))
+ elif sub_config2:
+ if not all([sub_config2.get("IpRange"), sub_config2.get("SubnetMask")]):
+ module.fail_json(msg="Both ip range and subnet mask in required to {0} an identity"
+ " pool using iSCSI settings.".format(action))
+
+
+def pool_create_modify(module, rest_obj):
+ pool_name = module.params["pool_name"]
+ pool_id, existing_payload = get_identity_pool_id_by_name(pool_name, rest_obj)
+ method = "POST"
+ uri = IDENTITY_URI
+ action = "create"
+ setting_payload = get_payload(module, pool_id)
+ if pool_id:
+ action = "modify"
+ method = "PUT"
+ uri = uri + "({0})".format(pool_id)
+ if compare_nested_dict(setting_payload, existing_payload):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ setting_payload = get_updated_modify_payload(setting_payload, existing_payload)
+ validate_modify_create_payload(setting_payload, module, action)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ resp = rest_obj.invoke_request(method, uri, data=setting_payload)
+ msg = get_success_message(action, resp.json_data)
+ return msg
+
+
+def pool_delete(module, rest_obj):
+ try:
+ pool_name = module.params["pool_name"]
+ pool_id, existing_payload = get_identity_pool_id_by_name(pool_name, rest_obj)
+ if not pool_id:
+ message = "The identity pool '{0}' is not present in the system.".format(pool_name)
+ module.exit_json(msg=message)
+ method = "DELETE"
+ uri = IDENTITY_URI + "({0})".format(pool_id)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ rest_obj.invoke_request(method, uri)
+ return {"msg": "Successfully deleted the identity pool."}
+ except Exception as err:
+ raise err
+
+
+def get_success_message(action, resp_data):
+ message = {
+ "create": "Successfully created an identity pool.",
+ "modify": "Successfully modified the identity pool."
+ }
+ return {"msg": message[action], "result": resp_data}
+
+
+def main():
+ settings_options = {"starting_mac_address": {"type": 'str'},
+ "identity_count": {"type": 'int'}}
+ iscsi_specific_settings = {"starting_mac_address": {"type": 'str'},
+ "identity_count": {"type": 'int'},
+ "initiator_config": {"options": {"iqn_prefix": {"type": 'str'}}, "type": "dict"},
+ "initiator_ip_pool_settings": {"options": {"ip_range": {"type": 'str'},
+ "subnet_mask": {"type": 'str'},
+ "gateway": {"type": 'str'},
+ "primary_dns_server": {"type": 'str'},
+ "secondary_dns_server": {"type": 'str'}},
+ "type": "dict"}}
+ fc_settings = {"starting_address": {"type": "str"}, "identity_count": {"type": "int"}}
+
+ specs = {
+ "state": {"type": "str", "required": False, "default": "present", "choices": ['present', 'absent']},
+ "pool_name": {"required": True, "type": "str"},
+ "new_pool_name": {"required": False, "type": "str"},
+ "pool_description": {"required": False, "type": "str"},
+ "ethernet_settings": {"required": False, "type": "dict",
+ "options": settings_options},
+ "fcoe_settings": {"required": False, "type": "dict", "options": settings_options},
+ "iscsi_settings": {"required": False, "type": "dict",
+ "options": iscsi_specific_settings},
+ "fc_settings": {"required": False, "type": "dict", "options": fc_settings},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ state = module.params["state"]
+ if state == "present":
+ message = pool_create_modify(module, rest_obj)
+ module.exit_json(msg=message["msg"], pool_status=message["result"], changed=True)
+ else:
+ message = pool_delete(module, rest_obj)
+ module.exit_json(msg=message["msg"], changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py
new file mode 100644
index 000000000..26b0d545e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_job_info
+short_description: Get job details for a given job ID or an entire job queue on OpenMange Enterprise
+version_added: "2.0.0"
+description: This module retrieves job details for a given job ID or an entire job queue on OpenMange Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ job_id:
+ description: Unique ID of the job.
+ type: int
+ system_query_options:
+ description: Options for pagination of the output.
+ type: dict
+ suboptions:
+ top:
+ description: Number of records to return. Default value is 100.
+ type: int
+ skip:
+ description: Number of records to skip. Default value is 0.
+ type: int
+ filter:
+ description: Filter records by the values supported.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author: "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Get all jobs details
+ dellemc.openmanage.ome_job_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Get job details for id
+ dellemc.openmanage.ome_job_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: 12345
+
+- name: Get filtered job details
+ dellemc.openmanage.ome_job_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ system_query_options:
+ top: 2
+ skip: 1
+ filter: "JobType/Id eq 8"
+
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the job facts operation.
+ returned: always
+ type: str
+ sample: "Successfully fetched the job info"
+job_info:
+ description: Details of the OpenManage Enterprise jobs.
+ returned: success
+ type: dict
+ sample: {
+ "value": [
+ {
+ "Builtin": false,
+ "CreatedBy": "system",
+ "Editable": true,
+ "EndTime": null,
+ "Id": 12345,
+ "JobDescription": "Refresh Inventory for Device",
+ "JobName": "Refresh Inventory for Device",
+ "JobStatus": {
+ "Id": 2080,
+ "Name": "New"
+ },
+ "JobType": {
+ "Id": 8,
+ "Internal": false,
+ "Name": "Inventory_Task"
+ },
+ "LastRun": "2000-01-29 10:51:34.776",
+ "LastRunStatus": {
+ "Id": 2060,
+ "Name": "Completed"
+ },
+ "NextRun": null,
+ "Params": [],
+ "Schedule": "",
+ "StartTime": null,
+ "State": "Enabled",
+ "Targets": [
+ {
+ "Data": "''",
+ "Id": 123123,
+ "JobId": 12345,
+ "TargetType": {
+ "Id": 1000,
+ "Name": "DEVICE"
+ }
+ }
+ ],
+ "UpdatedBy": null,
+ "Visible": true
+ }
+ ]}
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+JOBS_URI = "JobService/Jobs"
+
+
+def _get_query_parameters(module_params):
+ """Builds query parameter
+ :returns: dictionary, which builds the query format
+ eg : {"$filter": "JobType/Id eq 8"}
+ """
+ system_query_options_param = module_params.get("system_query_options")
+ query_parameter = {}
+ if system_query_options_param:
+ query_parameter = dict([("$" + k, v) for k, v in system_query_options_param.items() if v is not None])
+ return query_parameter
+
+
+def main():
+ specs = {
+ "job_id": {"required": False, "type": 'int'},
+ "system_query_options": {"required": False, "type": 'dict', "options": {
+ "top": {"type": 'int', "required": False},
+ "skip": {"type": 'int', "required": False},
+ "filter": {"type": 'str', "required": False},
+ }},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ resp_status = []
+ if module.params.get("job_id") is not None:
+ # Fetch specific job
+ job_id = module.params.get("job_id")
+ jpath = "{0}({1})".format(JOBS_URI, job_id)
+ resp = rest_obj.invoke_request('GET', jpath)
+ job_facts = resp.json_data
+ resp_status.append(resp.status_code)
+ else:
+ # query applicable only for all jobs list fetching
+ query_param = _get_query_parameters(module.params)
+ if query_param:
+ resp = rest_obj.invoke_request('GET', JOBS_URI, query_param=query_param)
+ job_facts = resp.json_data
+ resp_status.append(resp.status_code)
+ else:
+ # Fetch all jobs, filter and pagination options
+ job_report = rest_obj.get_all_report_details(JOBS_URI)
+ job_facts = {"@odata.context": job_report["resp_obj"].json_data["@odata.context"],
+ "@odata.count": len(job_report["report_list"]),
+ "value": job_report["report_list"]}
+ if job_facts["@odata.count"] > 0:
+ resp_status.append(200)
+ except HTTPError as httperr:
+ module.fail_json(msg=str(httperr), job_info=json.load(httperr))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+ if 200 in resp_status:
+ module.exit_json(msg="Successfully fetched the job info", job_info=job_facts)
+ else:
+ module.fail_json(msg="Failed to fetch the job info")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py
new file mode 100644
index 000000000..08e307c78
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py
@@ -0,0 +1,283 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_network_port_breakout
+short_description: This module allows to automate the port portioning or port breakout to logical sub ports
+version_added: "2.1.0"
+description:
+ - This module allows to automate breaking out of IOMs in fabric mode into logical sub ports.
+ - The port breakout operation is only supported in OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ target_port:
+ required: True
+ description: "The ID of the port in the switch to breakout. Enter the port ID in the format: service tag:port.
+ For example, 2HB7NX2:ethernet1/1/13."
+ type: str
+ breakout_type:
+ required: True
+ description:
+ - The preferred breakout type. For example, 4X10GE.
+ - To revoke the default breakout configuration, enter 'HardwareDefault'.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Port breakout configuration
+ dellemc.openmanage.ome_network_port_breakout:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target_port: "2HB7NX2:phy-port1/1/11"
+ breakout_type: "1X40GE"
+
+- name: Revoke the default breakout configuration
+ dellemc.openmanage.ome_network_port_breakout:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target_port: "2HB7NX2:phy-port1/1/11"
+ breakout_type: "HardwareDefault"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the port configuration.
+ returned: always
+ type: str
+ sample: Port breakout configuration job submitted successfully.
+breakout_status:
+ description: Details of the OpenManage Enterprise jobs.
+ returned: success
+ type: dict
+ sample: {
+ "Builtin": false,
+ "CreatedBy": "root",
+ "Editable": true,
+ "EndTime": null,
+ "Id": 11111,
+ "JobDescription": "",
+ "JobName": "Breakout Port",
+ "JobStatus": {"Id": 1112, "Name": "New"},
+ "JobType": {"Id": 3, "Internal": false, "Name": "DeviceAction_Task"},
+ "LastRun": null,
+ "LastRunStatus": {"Id": 1113, "Name": "NotRun"},
+ "NextRun": null,
+ "Params": [
+ {"JobId": 11111, "Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"},
+ {"JobId": 11111, "Key": "interfaceId", "Value": "2HB7NX2:phy-port1/1/11"},
+ {"JobId": 11111, "Key": "breakoutType", "Value": "1X40GE"}],
+ "Schedule": "startnow",
+ "StartTime": null,
+ "State": "Enabled",
+ "Targets": [
+ {"Data": "", "Id": 11112, "JobId": 34206, "TargetType": { "Id": 1000, "Name": "DEVICE"}}
+ ],
+ "UpdatedBy": null,
+ "UserGenerated": true,
+ "Visible": true
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import re
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+DEVICE_URI = "DeviceService/Devices"
+PORT_INFO_URI = "DeviceService/Devices({0})/InventoryDetails('portInformation')"
+JOB_URI = "JobService/Jobs"
+
+
+def get_device_id(module, rest_obj):
+ """
+ This function returns device id.
+ :param module: ansible module arguments.
+ :param rest_obj: rest object for making requests.
+ :return: device id
+ """
+ regex = "^[a-z0-9A-Z]+[:][a-z0-9A-Z/-]+$"
+ target_port = module.params["target_port"]
+ if re.search(regex, target_port) is None:
+ module.fail_json(msg="Invalid target port {0}.".format(target_port))
+ service_tag = target_port.split(":")
+ query = "DeviceServiceTag eq '{0}'".format(service_tag[0])
+ device_id, failed_msg = None, "Unable to retrieve the device information because" \
+ " the device with the entered service tag {0} is not present."
+ response = rest_obj.invoke_request("GET", DEVICE_URI, query_param={"$filter": query})
+ if response.status_code == 200 and response.json_data.get("value"):
+ device_info = response.json_data.get("value")[0]
+ device_id = device_info["Id"]
+ else:
+ module.fail_json(msg=failed_msg.format(service_tag[0]))
+ return device_id
+
+
+def get_port_information(module, rest_obj, device_id):
+ """
+ This function returns the existing breakout configuration details.
+ :param module: ansible module arguments.
+ :param rest_obj: rest object for making requests.
+ :param device_id: device id
+ :return: str, {}, str
+ """
+ response = rest_obj.invoke_request("GET", PORT_INFO_URI.format(device_id))
+ breakout_config, breakout_capability, target_port = None, None, module.params["target_port"]
+ for each in response.json_data.get("InventoryInfo"):
+ if not each["Configuration"] == "NoBreakout" and each["Id"] == target_port:
+ breakout_capability = each["PortBreakoutCapabilities"]
+ breakout_config = each["Configuration"]
+ interface_id = each["Id"]
+ break
+ else:
+ module.fail_json(msg="{0} does not support port breakout"
+ " or invalid port number entered.".format(target_port))
+ return breakout_config, breakout_capability, interface_id
+
+
+def get_breakout_payload(device_id, breakout_type, interface_id):
+ """
+ Payload for breakout configuration.
+ :param device_id: device id
+ :param breakout_type: requested breakout type
+ :param interface_id: port number with service tag
+ :return: json
+ """
+ payload = {
+ "Id": 0, "JobName": "Breakout Port", "JobDescription": "",
+ "Schedule": "startnow", "State": "Enabled",
+ "JobType": {"Id": 3, "Name": "DeviceAction_Task"},
+ "Params": [
+ {"Key": "breakoutType", "Value": breakout_type},
+ {"Key": "interfaceId", "Value": interface_id},
+ {"Key": "operationName", "Value": "CONFIGURE_PORT_BREAK_OUT"}],
+ "Targets": [
+ {"JobId": 0, "Id": device_id, "Data": "", "TargetType": {"Id": 4000, "Name": "DEVICE"}}
+ ]}
+ return payload
+
+
+def check_mode(module, changes=False):
+ """
+ The check mode function to check whether the changes found or not.
+ :param module: ansible module arguments
+ :param changes: boolean to return the appropriate message.
+ :return: None
+ """
+ if module.check_mode:
+ message = "Changes found to commit!" if changes else "No changes found to commit!"
+ module.exit_json(msg=message, changed=changes)
+
+
+def set_breakout(module, rest_obj, breakout_config, breakout_capability, interface_id, device_id):
+ """
+ Configuration the breakout feature for given option.
+ :param module: ansible module arguments.
+ :param rest_obj: rest object for making requests.
+ :param breakout_config: Existing breakout configuration.
+ :param breakout_capability: Available breakout configuration.
+ :param interface_id: port number with service tag
+ :param device_id: device id
+ :return: rest object
+ """
+ breakout_type, response = module.params["breakout_type"], {}
+ payload = get_breakout_payload(device_id, breakout_type, interface_id)
+ if breakout_config == "HardwareDefault" and not breakout_type == "HardwareDefault":
+ for config in breakout_capability:
+ if breakout_type == config["Type"]:
+ check_mode(module, changes=True)
+ response = rest_obj.invoke_request("POST", JOB_URI, data=payload)
+ break
+ else:
+ supported_type = ", ".join(i["Type"] for i in breakout_capability)
+ module.fail_json(msg="Invalid breakout type: {0}, supported values are {1}.".format(breakout_type,
+ supported_type))
+ elif not breakout_config == "HardwareDefault" and breakout_type == "HardwareDefault":
+ check_mode(module, changes=True)
+ response = rest_obj.invoke_request("POST", JOB_URI, data=payload)
+ elif breakout_config == breakout_type:
+ check_mode(module, changes=False)
+ module.exit_json(msg="The port is already configured with the selected breakout configuration.")
+ else:
+ module.fail_json(msg="Device does not support changing a port breakout"
+ " configuration to different breakout type. Configure the port to"
+ " HardwareDefault and retry the operation.")
+ return response
+
+
+def main():
+ specs = {
+ "target_port": {"required": True, "type": 'str'},
+ "breakout_type": {"required": True, "type": 'str'},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ device_id = get_device_id(module, rest_obj)
+ breakout_config, breakout_capability, interface_id = get_port_information(module, rest_obj, device_id)
+ breakout_status = set_breakout(module, rest_obj, breakout_config,
+ breakout_capability, interface_id, device_id)
+ if breakout_status:
+ module.exit_json(msg="Port breakout configuration job submitted successfully.",
+ breakout_status=breakout_status.json_data, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, IndexError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py
new file mode 100644
index 000000000..90ac7a837
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.3.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_network_vlan
+short_description: Create, modify & delete a VLAN
+version_added: "2.1.0"
+description:
+ - This module allows to,
+ - Create a VLAN on OpenManage Enterprise.
+ - Modify or delete an existing VLAN on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ state:
+ type: str
+ description:
+ - C(present) creates a new VLAN or modifies an existing VLAN.
+ - C(absent) deletes an existing VLAN.
+ - I(WARNING) Deleting a VLAN can impact the network infrastructure.
+ choices: [present, absent]
+ default: present
+ name:
+ required: true
+ type: str
+ description: Provide the I(name) of the VLAN to be created, deleted or modified.
+ new_name:
+ type: str
+ description: Provide the I(name) of the VLAN to be modified.
+ description:
+ type: str
+ description: Short description of the VLAN to be created or modified.
+ vlan_minimum:
+ type: int
+ description:
+ - The minimum VLAN value of the range.
+ vlan_maximum:
+ type: int
+ description:
+ - The maximum VLAN value of the range.
+ - A single value VLAN is created if the vlan_maximum and vlan_minmum values are the same.
+ type:
+ type: str
+ description:
+ - Types of supported VLAN networks.
+ - "For the description of each network type,
+ use API U(https://I(hostname)/api/NetworkConfigurationService/NetworkTypes)."
+ choices: ['General Purpose (Bronze)', 'General Purpose (Silver)', 'General Purpose (Gold)',
+ 'General Purpose (Platinum)', 'Cluster Interconnect', 'Hypervisor Management',
+ 'Storage - iSCSI', 'Storage - FCoE', 'Storage - Data Replication',
+ 'VM Migration', 'VMWare FT Logging']
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create a VLAN range
+ dellemc.openmanage.ome_network_vlan:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ name: "vlan1"
+ description: "VLAN desc"
+ type: "General Purpose (Bronze)"
+ vlan_minimum: 35
+ vlan_maximum: 40
+ tags: create_vlan_range
+
+- name: Create a VLAN with a single value
+ dellemc.openmanage.ome_network_vlan:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ name: "vlan2"
+ description: "VLAN desc"
+ type: "General Purpose (Bronze)"
+ vlan_minimum: 127
+ vlan_maximum: 127
+ tags: create_vlan_single
+
+- name: Modify a VLAN
+ dellemc.openmanage.ome_network_vlan:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ name: "vlan1"
+ new_name: "vlan_gold1"
+ description: "new description"
+ type: "General Purpose (Gold)"
+ vlan_minimum: 45
+ vlan_maximum: 50
+ tags: modify_vlan
+
+- name: Delete a VLAN
+ dellemc.openmanage.ome_network_vlan:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ name: "vlan1"
+ tags: delete_vlan
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the VLAN operation.
+ returned: always
+ sample: "Successfully created the VLAN."
+vlan_status:
+ type: dict
+ description: Details of the VLAN that is either created or modified.
+ returned: when I(state=present)
+ sample: {
+ "@odata.context": "/api/$metadata#NetworkConfigurationService.Network",
+ "@odata.type": "#NetworkConfigurationService.Network",
+ "@odata.id": "/api/NetworkConfigurationService/Networks(1234)",
+ "Id": 1234,
+ "Name": "vlan1",
+ "Description": "VLAN description",
+ "VlanMaximum": 130,
+ "VlanMinimum": 140,
+ "Type": 1,
+ "CreatedBy": "admin",
+ "CreationTime": "2020-01-01 05:54:36.113",
+ "UpdatedBy": null,
+ "UpdatedTime": "2020-01-01 05:54:36.113",
+ "InternalRefNWUUId": "6d6effcc-eca4-44bd-be07-1234ab5cd67e"
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CTEM1043",
+ "RelatedProperties": [],
+ "Message": "Unable to create or update the network because the entered VLAN minimum 0
+ is not within a valid range ( 1 - 4000 or 4021 - 4094 ).",
+ "MessageArgs": [
+ "0",
+ "1",
+ "4000",
+ "4021",
+ "4094"
+ ],
+ "Severity": "Warning",
+ "Resolution": "Enter a valid VLAN minimum as identified in the message and retry the operation."
+ }
+ ]
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+VLAN_CONFIG = "NetworkConfigurationService/Networks"
+VLAN_ID_CONFIG = "NetworkConfigurationService/Networks({Id})"
+VLAN_TYPES = "NetworkConfigurationService/NetworkTypes"
+VLAN_RANGE_OVERLAP = "Unable to create or update the VLAN because the entered range" \
+ " overlaps with {vlan_name} with the range {vlan_min}-{vlan_max}."
+VLAN_VALUE_MSG = "VLAN-minimum value is greater than VLAN-maximum value."
+CHECK_MODE_MSG = "Changes found to be applied."
+
+
+def format_payload(src_dict):
+ address_payload_map = {
+ "name": "Name",
+ "vlan_maximum": "VlanMaximum",
+ "vlan_minimum": "VlanMinimum",
+ "type": "Type"
+ }
+ if src_dict:
+ return dict([(address_payload_map[key], val) for key, val in src_dict.items() if key in address_payload_map])
+
+
+def get_item_id(rest_obj, name, uri):
+ resp = rest_obj.invoke_request('GET', uri)
+ tlist = []
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get('Name', "") == name:
+ return xtype.get('Id'), tlist
+ return 0, tlist
+
+
+def check_overlapping_vlan_range(payload, vlans):
+ current_vlan = None
+ for xtype in vlans:
+ overlap = list(range(max(xtype.get('VlanMinimum', 0), payload["VlanMinimum"]),
+ min(xtype.get('VlanMaximum', 0), payload["VlanMaximum"]) + 1))
+ if overlap:
+ current_vlan = xtype
+ break
+ return current_vlan
+
+
+def create_vlan(module, rest_obj, vlans):
+ payload = format_payload(module.params)
+ if not all(payload.values()):
+ module.fail_json(msg="The vlan_minimum, vlan_maximum and type values are required for creating a VLAN.")
+ if payload["VlanMinimum"] > payload["VlanMaximum"]:
+ module.fail_json(msg=VLAN_VALUE_MSG)
+ overlap = check_overlapping_vlan_range(payload, vlans)
+ if overlap:
+ module.fail_json(msg=VLAN_RANGE_OVERLAP.format(vlan_name=overlap["Name"], vlan_min=overlap["VlanMinimum"],
+ vlan_max=overlap["VlanMaximum"]))
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHECK_MODE_MSG)
+ if module.params.get("description"):
+ payload["Description"] = module.params.get("description")
+ payload["Type"], types = get_item_id(rest_obj, module.params["type"], VLAN_TYPES)
+ if not payload["Type"]:
+ module.fail_json(msg="Network type '{0}' not found.".format(module.params["type"]))
+ resp = rest_obj.invoke_request("POST", VLAN_CONFIG, data=payload)
+ module.exit_json(msg="Successfully created the VLAN.", vlan_status=resp.json_data, changed=True)
+
+
+def delete_vlan(module, rest_obj, vlan_id):
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHECK_MODE_MSG)
+ resp = rest_obj.invoke_request("DELETE", VLAN_ID_CONFIG.format(Id=vlan_id))
+ module.exit_json(msg="Successfully deleted the VLAN.", changed=True)
+
+
+def modify_vlan(module, rest_obj, vlan_id, vlans):
+ payload = format_payload(module.params)
+ payload["Description"] = module.params.get("description")
+ if module.params.get("type"):
+ payload["Type"], types = get_item_id(rest_obj, module.params["type"], VLAN_TYPES)
+ if not payload["Type"]:
+ module.fail_json(msg="Network type '{0}' not found.".format(module.params["type"]))
+ if module.params.get("new_name"):
+ payload["Name"] = module.params["new_name"]
+ current_setting = {}
+ for i in range(len(vlans)):
+ if vlans[i]['Id'] == vlan_id:
+ current_setting = vlans.pop(i)
+ break
+ diff = 0
+ for config, pload in payload.items():
+ pval = payload.get(config)
+ if pval is not None:
+ if current_setting.get(config) != pval:
+ payload[config] = pval
+ diff += 1
+ else:
+ payload[config] = current_setting.get(config)
+ if payload["VlanMinimum"] > payload["VlanMaximum"]:
+ module.fail_json(msg=VLAN_VALUE_MSG)
+ overlap = check_overlapping_vlan_range(payload, vlans)
+ if overlap:
+ module.fail_json(msg=VLAN_RANGE_OVERLAP.format(vlan_name=overlap["Name"], vlan_min=overlap["VlanMinimum"],
+ vlan_max=overlap["VlanMaximum"]))
+ if diff == 0: # Idempotency
+ if module.check_mode:
+ module.exit_json(msg="No changes found to be applied to the VLAN configuration.")
+ module.exit_json(msg="No changes found to be applied as the entered values are the same as the"
+ " current configuration.", vlan_status=current_setting)
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHECK_MODE_MSG)
+ payload["Id"] = vlan_id
+ resp = rest_obj.invoke_request("PUT", VLAN_ID_CONFIG.format(Id=vlan_id), data=payload)
+ module.exit_json(msg="Successfully updated the VLAN.", vlan_status=resp.json_data, changed=True)
+
+
+def check_existing_vlan(module, rest_obj):
+ vlan_id, vlans = get_item_id(rest_obj, module.params["name"], VLAN_CONFIG + "?$top=9999")
+ return vlan_id, vlans
+
+
+def main():
+ specs = {
+ "state": {"required": False, "choices": ['present', 'absent'], "default": "present"},
+ "name": {"required": True, "type": "str"},
+ "new_name": {"required": False, "type": "str"},
+ "description": {"required": False, "type": "str"},
+ "vlan_minimum": {"required": False, "type": "int"},
+ "vlan_maximum": {"required": False, "type": "int"},
+ "type": {"required": False, "type": "str",
+ "choices": ['General Purpose (Bronze)', 'General Purpose (Silver)', 'General Purpose (Gold)',
+ 'General Purpose (Platinum)', 'Cluster Interconnect', 'Hypervisor Management',
+ 'Storage - iSCSI', 'Storage - FCoE', 'Storage - Data Replication', 'VM Migration',
+ 'VMWare FT Logging']}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['state', 'present', ('new_name', 'description', 'vlan_minimum', 'vlan_maximum', 'type',), True]],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ vlan_id, vlans = check_existing_vlan(module, rest_obj)
+ if module.params["state"] == "present":
+ if vlan_id:
+ modify_vlan(module, rest_obj, vlan_id, vlans)
+ create_vlan(module, rest_obj, vlans)
+ else:
+ if vlan_id:
+ delete_vlan(module, rest_obj, vlan_id)
+ if module.check_mode:
+ module.exit_json(msg="No changes found to be applied to the VLAN configuration.")
+ module.exit_json(msg="VLAN {0} does not exist.".format(module.params["name"]))
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py
new file mode 100644
index 000000000..f1de512be
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_network_vlan_info
+short_description: Retrieves the information about networks VLAN(s) present in OpenManage Enterprise
+version_added: "2.1.0"
+description:
+ This module allows to retrieve the following.
+ - A list of all the network VLANs with their detailed information.
+ - Information about a specific network VLAN using VLAN I(id) or VLAN I(name).
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ id:
+ description:
+ - A unique identifier of the network VLAN available in the device.
+ - I(id) and I(name) are mutually exclusive.
+ type: int
+ name:
+ description:
+ - A unique name of the network VLAN available in the device.
+ - I(name) and I(id) are mutually exclusive.
+ type: str
+
+requirements:
+ - "python >= 3.8.6"
+author: "Deepak Joshi(@deepakjoshishri)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve information about all network VLANs(s) available in the device
+ dellemc.openmanage.ome_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve information about a network VLAN using the VLAN ID
+ dellemc.openmanage.ome_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ id: 12345
+
+- name: Retrieve information about a network VLAN using the VLAN name
+ dellemc.openmanage.ome_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Network VLAN - 1"
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Detailed information of the network VLAN(s).
+ returned: success
+ sample: {
+ "msg": "Successfully retrieved the network VLAN information.",
+ "network_vlan_info": [
+ {
+ "CreatedBy": "admin",
+ "CreationTime": "2020-09-02 18:48:42.129",
+ "Description": "Description of Logical Network - 1",
+ "Id": 20057,
+ "InternalRefNWUUId": "42b9903d-93f8-4184-adcf-0772e4492f71",
+ "Name": "Network VLAN - 1",
+ "Type": {
+ "Description": "This is the network for general purpose traffic. QOS Priority : Bronze.",
+ "Id": 1,
+ "Name": "General Purpose (Bronze)",
+ "NetworkTrafficType": "Ethernet",
+ "QosType": {
+ "Id": 4,
+ "Name": "Bronze"
+ },
+ "VendorCode": "GeneralPurpose"
+ },
+ "UpdatedBy": null,
+ "UpdatedTime": "2020-09-02 18:48:42.129",
+ "VlanMaximum": 111,
+ "VlanMinimum": 111
+ },
+ {
+ "CreatedBy": "admin",
+ "CreationTime": "2020-09-02 18:49:11.507",
+ "Description": "Description of Logical Network - 2",
+ "Id": 20058,
+ "InternalRefNWUUId": "e46ccb3f-ef57-4617-ac76-46c56594005c",
+ "Name": "Network VLAN - 2",
+ "Type": {
+ "Description": "This is the network for general purpose traffic. QOS Priority : Silver.",
+ "Id": 2,
+ "Name": "General Purpose (Silver)",
+ "NetworkTrafficType": "Ethernet",
+ "QosType": {
+ "Id": 3,
+ "Name": "Silver"
+ },
+ "VendorCode": "GeneralPurpose"
+ },
+ "UpdatedBy": null,
+ "UpdatedTime": "2020-09-02 18:49:11.507",
+ "VlanMaximum": 112,
+ "VlanMinimum": 112
+ }
+ ]
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+# Base URI to fetch all logical networks information
+NETWORK_VLAN_BASE_URI = "NetworkConfigurationService/Networks"
+NETWORK_TYPE_BASE_URI = "NetworkConfigurationService/NetworkTypes"
+QOS_TYPE_BASE_URI = "NetworkConfigurationService/QosTypes"
+
+# Module Success Message
+MODULE_SUCCESS_MESSAGE = "Successfully retrieved the network VLAN information."
+
+# Module Failure Messages
+MODULE_FAILURE_MESSAGE = "Failed to retrieve the network VLAN information."
+NETWORK_VLAN_NAME_NOT_FOUND = "Provided network VLAN with name - '{0}' does not exist."
+
+SAFE_MAX_LIMIT = 9999
+
+
+def clean_data(data):
+ """
+ data: A dictionary.
+ return: A data dictionary after removing items that are not required for end user.
+ """
+ for k in ['@odata.id', '@odata.type', '@odata.context', '@odata.count']:
+ data.pop(k, None)
+ return data
+
+
+def get_type_information(rest_obj, uri):
+ """
+ rest_obj: Object containing information about connection to device.
+ return: dict with information retrieved from URI.
+ """
+ type_info_dict = {}
+ resp = rest_obj.invoke_request('GET', uri)
+ if resp.status_code == 200:
+ type_info = resp.json_data.get('value') if isinstance(resp.json_data.get('value'), list) \
+ else [resp.json_data]
+ for item in type_info:
+ item = clean_data(item)
+ type_info_dict[item['Id']] = item
+ return type_info_dict
+
+
+def get_network_type_and_qos_type_information(rest_obj):
+ """
+ rest_obj: Object containing information about connection to device.
+ return: Dictionary with information for "Type" and "QosType" keys.
+ """
+ # Fetch network type and qos type information once
+ network_type_dict = get_type_information(rest_obj, NETWORK_TYPE_BASE_URI)
+ qos_type_dict = get_type_information(rest_obj, QOS_TYPE_BASE_URI)
+ # Update each network type with qos type info
+ for key, item in network_type_dict.items():
+ item['QosType'] = qos_type_dict[item['QosType']]
+ return network_type_dict
+
+
+def main():
+ specs = {
+ "id": {"required": False, "type": 'int'},
+ "name": {"required": False, "type": 'str'}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[["id", "name"]],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ # Form URI to fetch network VLAN information
+ network_vlan_uri = "{0}({1})".format(NETWORK_VLAN_BASE_URI, module.params.get("id")) if module.params.get(
+ "id") else "{0}?$top={1}".format(NETWORK_VLAN_BASE_URI, SAFE_MAX_LIMIT)
+ resp = rest_obj.invoke_request('GET', network_vlan_uri)
+ if resp.status_code == 200:
+ network_vlan_info = resp.json_data.get('value') if isinstance(resp.json_data.get('value'), list) else [
+ resp.json_data]
+ if module.params.get("name"):
+ network_vlan_name = module.params.get("name")
+ network_vlan = []
+ for item in network_vlan_info:
+ if item["Name"] == network_vlan_name.strip():
+ network_vlan = [item]
+ break
+ if not network_vlan:
+ module.fail_json(msg=NETWORK_VLAN_NAME_NOT_FOUND.format(network_vlan_name))
+ network_vlan_info = network_vlan
+ # Get network type and Qos Type information
+ network_type_dict = get_network_type_and_qos_type_information(rest_obj)
+ # Update each network VLAN with network type and wos type information
+ for network_vlan in network_vlan_info:
+ network_vlan = clean_data(network_vlan)
+ network_vlan['Type'] = network_type_dict[network_vlan['Type']]
+ module.exit_json(msg=MODULE_SUCCESS_MESSAGE, network_vlan_info=network_vlan_info)
+ else:
+ module.fail_json(msg=MODULE_FAILURE_MESSAGE)
+ except HTTPError as err:
+ if err.getcode() == 404:
+ module.fail_json(msg=str(err))
+ module.fail_json(msg=str(MODULE_FAILURE_MESSAGE), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, KeyError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py
new file mode 100644
index 000000000..7ead69f70
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_powerstate
+short_description: Performs the power management operations on OpenManage Enterprise
+version_added: "2.1.0"
+description: This module performs the supported power management operations on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ power_state:
+ description: Desired end power state.
+ type: str
+ required: True
+ choices: ['on', 'off', 'coldboot', 'warmboot', 'shutdown']
+ device_service_tag:
+ description:
+ - Targeted device service tag.
+ - I(device_service_tag) is mutually exclusive with I(device_id).
+ type: str
+ device_id:
+ description:
+ - Targeted device id.
+ - I(device_id) is mutually exclusive with I(device_service_tag).
+ type: int
+requirements:
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Power state operation based on device id
+ dellemc.openmanage.ome_powerstate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 11111
+ power_state: "off"
+
+- name: Power state operation based on device service tag
+ dellemc.openmanage.ome_powerstate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: "KLBR111"
+ power_state: "on"
+
+- name: Power state operation based on list of device ids
+ dellemc.openmanage.ome_powerstate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: "{{ item.device_id }}"
+ power_state: "{{ item.state }}"
+ with_items:
+ - { "device_id": 11111, "state": "on" }
+ - { "device_id": 22222, "state": "off" }
+
+- name: Power state operation based on list of device service tags
+ dellemc.openmanage.ome_powerstate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: "{{ item.service_tag }}"
+ power_state: "{{ item.state }}"
+ with_items:
+ - { "service_tag": "KLBR111", "state": "on" }
+ - { "service_tag": "KLBR222", "state": "off" }
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: "Overall power state operation job status."
+ returned: always
+ sample: "Power State operation job submitted successfully."
+job_status:
+ type: dict
+ description: "Power state operation job and progress details from the OME."
+ returned: success
+ sample: {
+ "Builtin": false,
+ "CreatedBy": "user",
+ "Editable": true,
+ "EndTime": null,
+ "Id": 11111,
+ "JobDescription": "DeviceAction_Task",
+ "JobName": "DeviceAction_Task_PowerState",
+ "JobStatus": {
+ "Id": 1111,
+ "Name": "New"
+ },
+ "JobType": {
+ "Id": 1,
+ "Internal": false,
+ "Name": "DeviceAction_Task"
+ },
+ "LastRun": "2019-04-01 06:39:02.69",
+ "LastRunStatus": {
+ "Id": 1112,
+ "Name": "Running"
+ },
+ "NextRun": null,
+ "Params": [
+ {
+ "JobId": 11111,
+ "Key": "powerState",
+ "Value": "2"
+ },
+ {
+ "JobId": 11111,
+ "Key": "operationName",
+ "Value": "POWER_CONTROL"
+ }
+ ],
+ "Schedule": "",
+ "StartTime": null,
+ "State": "Enabled",
+ "Targets": [
+ {
+ "Data": "",
+ "Id": 11112,
+ "JobId": 11111,
+ "TargetType": {
+ "Id": 1000,
+ "Name": "DEVICE"
+ }
+ }
+ ],
+ "UpdatedBy": null,
+ "Visible": true
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+VALID_OPERATION = {"on": 2, "off": 12, "coldboot": 5, "warmboot": 10, "shutdown": 8}
+POWER_STATE_MAP = {"on": 17, "off": 18, "poweringon": 20, "poweringoff": 21}
+NOT_APPLICABLE_OPTIONS = ["coldboot", "warmboot", "shutdown"]
+
+
+def spawn_update_job(rest_obj, payload):
+ """Spawns an update job and tracks it to completion."""
+ job_uri, job_details = "JobService/Jobs", {}
+ job_resp = rest_obj.invoke_request("POST", job_uri, data=payload)
+ if job_resp.status_code == 201:
+ job_details = job_resp.json_data
+ return job_details
+
+
+def build_power_state_payload(device_id, device_type, valid_option):
+ """Build the payload for requested device."""
+ payload = {
+ "Id": 0,
+ "JobName": "DeviceAction_Task_PowerState",
+ "JobDescription": "DeviceAction_Task",
+ "Schedule": "startnow",
+ "State": "Enabled",
+ "JobType": {"Id": 3, "Name": "DeviceAction_Task"},
+ "Params": [{"Key": "operationName", "Value": "POWER_CONTROL"},
+ {"Key": "powerState", "Value": str(valid_option)}],
+ "Targets": [{"Id": int(device_id), "Data": "",
+ "TargetType": {"Id": device_type, "Name": "DEVICE"}}],
+ }
+ return payload
+
+
+def get_device_state(module, resp, device_id):
+ """Get the current state and device type from response."""
+ current_state, device_type, invalid_device = None, None, True
+ for device in resp['report_list']:
+ if device['Id'] == int(device_id):
+ current_state = device.get('PowerState', None)
+ device_type = device['Type']
+ invalid_device = False
+ break
+ if invalid_device:
+ module.fail_json(msg="Unable to complete the operation because the entered target"
+ " device id '{0}' is invalid.".format(device_id))
+ if device_type not in (1000, 2000):
+ module.fail_json(msg="Unable to complete the operation because power"
+ " state supports device type 1000 and 2000.")
+ return current_state, device_type
+
+
+def get_device_resource(module, rest_obj):
+ """Getting the device id filtered from the device inventory."""
+ power_state = module.params['power_state']
+ device_id = module.params['device_id']
+ service_tag = module.params['device_service_tag']
+ resp_data = rest_obj.get_all_report_details("DeviceService/Devices")
+ if resp_data['report_list'] and service_tag is not None:
+ device_resp = dict([(device.get('DeviceServiceTag'), str(device.get('Id'))) for device in resp_data['report_list']])
+ if service_tag in device_resp:
+ device_id = device_resp[service_tag]
+ else:
+ module.fail_json(msg="Unable to complete the operation because the entered target"
+ " device service tag '{0}' is invalid.".format(service_tag))
+ current_state, device_type = get_device_state(module, resp_data, device_id)
+
+ # For check mode changes.
+ valid_option, valid_operation = VALID_OPERATION[power_state], False
+ if power_state in NOT_APPLICABLE_OPTIONS and current_state != POWER_STATE_MAP["on"]:
+ valid_operation = True
+ elif (valid_option == current_state) or \
+ (power_state == "on" and current_state in (POWER_STATE_MAP["on"], POWER_STATE_MAP['poweringon'])) or \
+ (power_state in ("off", "shutdown") and
+ current_state in (POWER_STATE_MAP["off"], POWER_STATE_MAP['poweringoff'])):
+ valid_operation = True
+
+ if module.check_mode and valid_operation:
+ module.exit_json(msg="No changes found to commit.")
+ elif module.check_mode and not valid_operation:
+ module.exit_json(msg="Changes found to commit.", changed=True)
+ payload = build_power_state_payload(device_id, device_type, valid_option)
+ return payload
+
+
+def main():
+ specs = {
+ "power_state": {"required": True, "type": "str",
+ "choices": ["on", "off", "coldboot", "warmboot", "shutdown"]},
+ "device_service_tag": {"required": False, "type": "str"},
+ "device_id": {"required": False, "type": "int"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[["device_service_tag", "device_id"]],
+ mutually_exclusive=[["device_service_tag", "device_id"]],
+ supports_check_mode=True
+ )
+ try:
+ if module.params['device_id'] is None and module.params['device_service_tag'] is None:
+ module.fail_json(msg="device_id and device_service_tag attributes should not be None.")
+ job_status = {}
+ with RestOME(module.params, req_session=True) as rest_obj:
+ payload = get_device_resource(module, rest_obj)
+ job_status = spawn_update_job(rest_obj, payload)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), job_status=json.load(err))
+ except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+ module.exit_json(msg="Power State operation job submitted successfully.",
+ job_status=job_status, changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py
new file mode 100644
index 000000000..d2f7a87c8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py
@@ -0,0 +1,863 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.2.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ome_profile
+short_description: Create, modify, delete, assign, unassign and migrate a profile on OpenManage Enterprise
+version_added: "3.1.0"
+description: "This module allows to create, modify, delete, assign, unassign, and migrate a profile on OpenManage Enterprise."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ command:
+ description:
+ - C(create) creates new profiles.
+ - "C(modify) modifies an existing profile. Only I(name), I(description), I(boot_to_network_iso), and I(attributes)
+ can be modified."
+ - C(delete) deletes an existing profile.
+ - C(assign) Deploys an existing profile on a target device and returns a task ID.
+ - C(unassign) unassigns a profile from a specified target and returns a task ID.
+ - C(migrate) migrates an existing profile and returns a task ID.
+ choices: [create, modify, delete, assign, unassign, migrate]
+ default: create
+ type: str
+ name_prefix:
+ description:
+ - The name provided when creating a profile is used a prefix followed by the number assigned to it by OpenManage Enterprise.
+ - This is applicable only for a create operation.
+ - This option is mutually exclusive with I(name).
+ type: str
+ default: Profile
+ name:
+ description:
+ - Name of the profile.
+ - This is applicable for modify, delete, assign, unassign, and migrate operations.
+ - This option is mutually exclusive with I(name_prefix) and I(number_of_profiles).
+ type: str
+ new_name:
+ description:
+ - New name of the profile.
+ - Applicable when I(command) is C(modify).
+ type: str
+ number_of_profiles:
+ description:
+ - Provide the number of profiles to be created.
+ - This is applicable when I(name_prefix) is used with C(create).
+ - This option is mutually exclusive with I(name).
+ - Openmanage Enterprise can create a maximum of 100 profiles.
+ type: int
+ default: 1
+ template_name:
+ description:
+ - Name of the template for creating the profile(s).
+ - This is applicable when I(command) is C(create).
+ - This option is mutually exclusive with I(template_id).
+ type: str
+ template_id:
+ description:
+ - ID of the template.
+ - This is applicable when I(command) is C(create).
+ - This option is mutually exclusive with I(template_name).
+ type: int
+ device_id:
+ description:
+ - ID of the target device.
+ - This is applicable when I(command) is C(assign) and C(migrate).
+ - This option is mutually exclusive with I(device_service_tag).
+ type: int
+ device_service_tag:
+ description:
+ - Identifier of the target device.
+ - This is typically 7 to 8 characters in length.
+ - Applicable when I(command) is C(assign), and C(migrate).
+ - This option is mutually exclusive with I(device_id).
+ - If the device does not exist when I(command) is C(assign) then the profile is auto-deployed.
+ type: str
+ description:
+ description: Description of the profile.
+ type: str
+ boot_to_network_iso:
+ description:
+ - Details of the Share iso.
+ - Applicable when I(command) is C(create), C(assign), and C(modify).
+ type: dict
+ suboptions:
+ boot_to_network:
+ description: Enable or disable a network share.
+ type: bool
+ required: true
+ share_type:
+ description: Type of network share.
+ type: str
+ choices: [NFS, CIFS]
+ share_ip:
+ description: IP address of the network share.
+ type: str
+ share_user:
+ description: User name when I(share_type) is C(CIFS).
+ type: str
+ share_password:
+ description: User password when I(share_type) is C(CIFS).
+ type: str
+ workgroup:
+ description: User workgroup when I(share_type) is C(CIFS).
+ type: str
+ iso_path:
+ description: Specify the full ISO path including the share name.
+ type: str
+ iso_timeout:
+ description: Set the number of hours that the network ISO file will remain mapped to the target device(s).
+ type: int
+ choices: [1, 2, 4, 8, 16]
+ default: 4
+ filters:
+ description:
+ - Filters the profiles based on selected criteria.
+ - This is applicable when I(command) is C(delete) or C(unassign).
+ - This supports suboption I(ProfileIds) which takes a list of profile IDs.
+ - This also supports OData filter expressions with the suboption I(Filters).
+ - See OpenManage Enterprise REST API guide for the filtering options available.
+ - I(WARNING) When this option is used in case of C(unassign), task ID is not returned for any of the profiles affected.
+ type: dict
+ force:
+ description:
+ - Provides the option to force the migration of a profile even if the source device cannot be contacted.
+ - This option is applicable when I(command) is C(migrate).
+ type: bool
+ default: false
+ attributes:
+ description: Attributes for C(modify) and C(assign).
+ type: dict
+ suboptions:
+ Attributes:
+ description:
+ - List of attributes to be modified, when I(command) is C(modify).
+ - List of attributes to be overridden when I(command) is C(assign).
+ - "Use the I(Id) If the attribute Id is available. If not, use the comma separated I (DisplayName).
+ For more details about using the I(DisplayName), see the example provided."
+ type: list
+ elements: dict
+ Options:
+ description:
+ - Provides the different shut down options.
+ - This is applicable when I(command) is C(assign).
+ type: dict
+ Schedule:
+ description:
+ - Schedule for profile deployment.
+ - This is applicable when I(command) is C(assign).
+ type: dict
+requirements:
+ - "python >= 3.8.6"
+author: "Jagadeesh N V (@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+ - C(assign) operation on a already assigned profile will not redeploy.
+'''
+
+EXAMPLES = r'''
+---
+- name: Create two profiles from a template
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: "template 1"
+ name_prefix: "omam_profile"
+ number_of_profiles: 2
+
+- name: Create profile with NFS share
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: create
+ template_name: "template 1"
+ name_prefix: "omam_profile"
+ number_of_profiles: 1
+ boot_to_network_iso:
+ boot_to_network: True
+ share_type: NFS
+ share_ip: "192.168.0.1"
+ iso_path: "path/to/my_iso.iso"
+ iso_timeout: 8
+
+- name: Create profile with CIFS share
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: create
+ template_name: "template 1"
+ name_prefix: "omam_profile"
+ number_of_profiles: 1
+ boot_to_network_iso:
+ boot_to_network: True
+ share_type: CIFS
+ share_ip: "192.168.0.2"
+ share_user: "username"
+ share_password: "password"
+ workgroup: "workgroup"
+ iso_path: "\\path\\to\\my_iso.iso"
+ iso_timeout: 8
+
+- name: Modify profile name with NFS share and attributes
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: modify
+ name: "Profile 00001"
+ new_name: "modified profile"
+ description: "new description"
+ boot_to_network_iso:
+ boot_to_network: True
+ share_type: NFS
+ share_ip: "192.168.0.3"
+ iso_path: "path/to/my_iso.iso"
+ iso_timeout: 8
+ attributes:
+ Attributes:
+ - Id: 4506
+ Value: "server attr 1"
+ IsIgnored: false
+ - Id: 4507
+ Value: "server attr 2"
+ IsIgnored: false
+ # Enter the comma separated string as appearing in the Detailed view on GUI
+ # System -> Server Topology -> ServerTopology 1 Aisle Name
+ - DisplayName: 'System, Server Topology, ServerTopology 1 Aisle Name'
+ Value: Aisle 5
+ IsIgnored: false
+
+- name: Delete a profile using profile name
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "delete"
+ name: "Profile 00001"
+
+- name: Delete profiles using filters
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "delete"
+ filters:
+ SelectAll: True
+ Filters: =contains(ProfileName,'Profile 00002')
+
+- name: Delete profiles using profile list filter
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "delete"
+ filters:
+ ProfileIds:
+ - 17123
+ - 16124
+
+- name: Assign a profile to target along with network share
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: assign
+ name: "Profile 00001"
+ device_id: 12456
+ boot_to_network_iso:
+ boot_to_network: True
+ share_type: NFS
+ share_ip: "192.168.0.1"
+ iso_path: "path/to/my_iso.iso"
+ iso_timeout: 8
+ attributes:
+ Attributes:
+ - Id: 4506
+ Value: "server attr 1"
+ IsIgnored: true
+ Options:
+ ShutdownType: 0
+ TimeToWaitBeforeShutdown: 300
+ EndHostPowerState: 1
+ StrictCheckingVlan: True
+ Schedule:
+ RunNow: True
+ RunLater: False
+
+- name: Unassign a profile using profile name
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "unassign"
+ name: "Profile 00003"
+
+- name: Unassign profiles using filters
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "unassign"
+ filters:
+ SelectAll: True
+ Filters: =contains(ProfileName,'Profile 00003')
+
+- name: Unassign profiles using profile list filter
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "unassign"
+ filters:
+ ProfileIds:
+ - 17123
+ - 16123
+
+- name: Migrate a profile
+ dellemc.openmanage.ome_profile:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "migrate"
+ name: "Profile 00001"
+ device_id: 12456
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the profile operation.
+ returned: always
+ type: str
+ sample: "Successfully created 2 profile(s)."
+profile_ids:
+ description: IDs of the profiles created.
+ returned: when I(command) is C(create)
+ type: list
+ sample: [1234, 5678]
+job_id:
+ description:
+ - Task ID created when I(command) is C(assign), C(migrate) or C(unassign).
+ - C(assign) and C(unassign) operations do not trigger a task if a profile is auto-deployed.
+ returned: when I(command) is C(assign), C(migrate) or C(unassign)
+ type: int
+ sample: 14123
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import time
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.common.dict_transformations import recursive_diff
+
+PROFILE_VIEW = "ProfileService/Profiles"
+TEMPLATE_VIEW = "TemplateService/Templates"
+DEVICE_VIEW = "DeviceService/Devices"
+JOB_URI = "JobService/Jobs({job_id})"
+PROFILE_ACTION = "ProfileService/Actions/ProfileService.{action}"
+PROFILE_ATTRIBUTES = "ProfileService/Profiles({profile_id})/AttributeDetails"
+PROFILE_NOT_FOUND = "Profile with the name '{name}' not found."
+CHANGES_MSG = "Changes found to be applied."
+NO_CHANGES_MSG = "No changes found to be applied."
+SEPRTR = ','
+
+
+def get_template_details(module, rest_obj):
+ id = module.params.get('template_id')
+ query_param = {"$filter": "Id eq {0}".format(id)}
+ srch = 'Id'
+ if not id:
+ id = module.params.get('template_name')
+ query_param = {"$filter": "Name eq '{0}'".format(id)}
+ srch = 'Name'
+ resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ return xtype
+ module.fail_json(msg="Template with {0} '{1}' not found.".format(srch, id))
+
+
+def get_target_details(module, rest_obj):
+ id = module.params.get('device_id')
+ query_param = {"$filter": "Id eq {0}".format(id)}
+ srch = 'Id'
+ if not id:
+ id = module.params.get('device_service_tag')
+ query_param = {"$filter": "Identifier eq '{0}'".format(id)}
+ srch = 'Identifier'
+ resp = rest_obj.invoke_request('GET', DEVICE_VIEW, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ return xtype
+ return "Target with {0} '{1}' not found.".format(srch, id)
+
+
+def get_profile(rest_obj, module):
+ """Get profile id based on requested profile name."""
+ profile_name = module.params["name"]
+ profile = None
+ query_param = {"$filter": "ProfileName eq '{0}'".format(profile_name)}
+ profile_req = rest_obj.invoke_request("GET", PROFILE_VIEW, query_param=query_param)
+ for each in profile_req.json_data.get('value'):
+ if each['ProfileName'] == profile_name:
+ profile = each
+ break
+ return profile
+
+
+def get_network_iso_payload(module):
+ boot_iso_dict = module.params.get("boot_to_network_iso")
+ iso_payload = {}
+ if boot_iso_dict:
+ iso_payload = {"BootToNetwork": False}
+ if boot_iso_dict.get("boot_to_network"):
+ iso_payload["BootToNetwork"] = True
+ share_type = boot_iso_dict.get("share_type")
+ iso_payload["ShareType"] = share_type
+ share_detail = {}
+ sh_ip = boot_iso_dict.get("share_ip")
+ share_detail["IpAddress"] = sh_ip
+ share_detail["ShareName"] = sh_ip
+ # share_detail["ShareName"] = boot_iso_dict.get("share_name") if boot_iso_dict.get("share_name") else sh_ip
+ share_detail["User"] = boot_iso_dict.get("share_user")
+ share_detail["Password"] = boot_iso_dict.get("share_password")
+ share_detail["WorkGroup"] = boot_iso_dict.get("workgroup")
+ iso_payload["ShareDetail"] = share_detail
+ if str(boot_iso_dict.get("iso_path")).lower().endswith('.iso'):
+ iso_payload["IsoPath"] = boot_iso_dict.get("iso_path")
+ else:
+ module.fail_json(msg="ISO path does not have extension '.iso'")
+ iso_payload["IsoTimeout"] = boot_iso_dict.get("iso_timeout")
+ return iso_payload
+
+
+def recurse_subattr_list(subgroup, prefix, attr_detailed, attr_map, adv_list):
+ if isinstance(subgroup, list):
+ for each_sub in subgroup:
+ nprfx = "{0}{1}{2}".format(prefix, SEPRTR, each_sub.get("DisplayName"))
+ if each_sub.get("SubAttributeGroups"):
+ recurse_subattr_list(each_sub.get("SubAttributeGroups"), nprfx, attr_detailed, attr_map, adv_list)
+ else:
+ for attr in each_sub.get('Attributes'):
+ attr['prefix'] = nprfx
+ # case sensitive, remove whitespaces for optim
+ constr = "{0}{1}{2}".format(nprfx, SEPRTR, attr['DisplayName'])
+ if constr in adv_list:
+ attr_detailed[constr] = attr['AttributeId']
+ attr_map[attr['AttributeId']] = attr
+
+
+def get_subattr_all(attr_dtls, adv_list):
+ attr_detailed = {}
+ attr_map = {}
+ for each in attr_dtls:
+ recurse_subattr_list(each.get('SubAttributeGroups'), each.get('DisplayName'), attr_detailed, attr_map, adv_list)
+ return attr_detailed, attr_map
+
+
+def attributes_check(module, rest_obj, inp_attr, profile_id):
+ diff = 0
+ try:
+ resp = rest_obj.invoke_request("GET", PROFILE_ATTRIBUTES.format(profile_id=profile_id))
+ attr_dtls = resp.json_data
+ disp_adv_list = inp_attr.get("Attributes", {})
+ adv_list = []
+ for attr in disp_adv_list:
+ if attr.get("DisplayName"):
+ split_k = str(attr.get("DisplayName")).split(SEPRTR)
+ trimmed = map(str.strip, split_k)
+ n_k = SEPRTR.join(trimmed)
+ adv_list.append(n_k)
+ attr_detailed, attr_map = get_subattr_all(attr_dtls.get('AttributeGroups'), adv_list)
+ payload_attr = inp_attr.get("Attributes", [])
+ rem_attrs = []
+ for attr in payload_attr:
+ if attr.get("DisplayName"):
+ split_k = str(attr.get("DisplayName")).split(SEPRTR)
+ trimmed = map(str.strip, split_k)
+ n_k = SEPRTR.join(trimmed)
+ id = attr_detailed.get(n_k, "")
+ attr['Id'] = id
+ attr.pop("DisplayName", None)
+ else:
+ id = attr.get('Id')
+ if id:
+ ex_val = attr_map.get(id, {})
+ if not ex_val:
+ rem_attrs.append(attr)
+ continue
+ if attr.get('Value') != ex_val.get("Value") or attr.get('IsIgnored') != ex_val.get("IsIgnored"):
+ diff = diff + 1
+ for rem in rem_attrs:
+ payload_attr.remove(rem)
+ # module.exit_json(attr_detailed=attr_detailed, inp_attr=disp_adv_list, payload_attr=payload_attr, adv_list=adv_list)
+ except Exception:
+ diff = 1
+ return diff
+
+
+def assign_profile(module, rest_obj):
+ mparam = module.params
+ payload = {}
+ if mparam.get('name'):
+ prof = get_profile(rest_obj, module)
+ if prof:
+ payload['Id'] = prof['Id']
+ else:
+ module.fail_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name')))
+ target = get_target_details(module, rest_obj)
+ if isinstance(target, dict):
+ payload['TargetId'] = target['Id']
+ if prof['ProfileState'] == 4:
+ if prof['TargetId'] == target['Id']:
+ module.exit_json(msg="The profile is assigned to the target {0}.".format(target['Id']))
+ else:
+ module.fail_json(msg="The profile is assigned to a different target. Use the migrate command or "
+ "unassign the profile and then proceed with assigning the profile to the target.")
+ action = "AssignProfile"
+ msg = "Successfully applied the assign operation."
+ try:
+ resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='GetInvalidTargetsForAssignProfile'),
+ data={'Id': prof['Id']})
+ if target['Id'] in list(resp.json_data):
+ module.fail_json(msg="The target device is invalid for the given profile.")
+ except HTTPError:
+ resp = None
+ ad_opts_list = ['Attributes', 'Options', 'Schedule']
+ else:
+ if mparam.get('device_id'):
+ module.fail_json(msg=target)
+ action = "AssignProfileForAutoDeploy"
+ msg = "Successfully applied the assign operation for auto-deployment."
+ payload['Identifier'] = mparam.get('device_service_tag')
+ if prof['ProfileState'] == 1:
+ if prof['TargetName'] == payload['Identifier']:
+ module.exit_json(msg="The profile is assigned to the target {0}.".format(payload['Identifier']))
+ else:
+ module.fail_json(msg="The profile is assigned to a different target. "
+ "Unassign the profile and then proceed with assigning the profile to the target.")
+ ad_opts_list = ['Attributes']
+ boot_iso_dict = get_network_iso_payload(module)
+ if boot_iso_dict:
+ payload["NetworkBootToIso"] = boot_iso_dict
+ ad_opts = mparam.get("attributes")
+ for opt in ad_opts_list:
+ if ad_opts and ad_opts.get(opt):
+ diff = attributes_check(module, rest_obj, ad_opts, prof['Id'])
+ payload[opt] = ad_opts.get(opt)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action=action), data=payload)
+ res_dict = {'msg': msg, 'changed': True}
+ if action == 'AssignProfile':
+ try:
+ res_prof = get_profile(rest_obj, module)
+ time.sleep(5)
+ if res_prof.get('DeploymentTaskId'):
+ res_dict['job_id'] = res_prof.get('DeploymentTaskId')
+ res_dict['msg'] = "Successfully triggered the job for the assign operation."
+ except HTTPError:
+ res_dict['msg'] = "Successfully applied the assign operation. Failed to fetch job details."
+ module.exit_json(**res_dict)
+
+
+def unassign_profile(module, rest_obj):
+ mparam = module.params
+ prof = {}
+ if mparam.get('name'):
+ payload = {}
+ prof = get_profile(rest_obj, module)
+ if prof:
+ if prof['ProfileState'] == 0:
+ module.exit_json(msg="Profile is in an unassigned state.")
+ if prof['DeploymentTaskId']:
+ try:
+ resp = rest_obj.invoke_request('GET', JOB_URI.format(job_id=prof['DeploymentTaskId']))
+ job_dict = resp.json_data
+ job_status = job_dict.get('LastRunStatus')
+ if job_status.get('Name') == 'Running':
+ module.fail_json(msg="Profile deployment task is in progress. Wait for the job to finish.")
+ except HTTPError:
+ msg = "Unable to fetch job details. Applied the unassign operation"
+ payload['ProfileIds'] = [prof['Id']]
+ else:
+ module.fail_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name')))
+ if mparam.get('filters'):
+ payload = mparam.get('filters')
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ msg = "Successfully applied the unassign operation. No job was triggered."
+ resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='UnassignProfiles'), data=payload)
+ res_dict = {'msg': msg, 'changed': True}
+ try:
+ res_prof = get_profile(rest_obj, module)
+ time.sleep(3)
+ if res_prof.get('DeploymentTaskId'):
+ res_dict['job_id'] = res_prof.get('DeploymentTaskId')
+ res_dict['msg'] = "Successfully triggered a job for the unassign operation."
+ except HTTPError:
+ res_dict['msg'] = "Successfully triggered a job for the unassign operation. Failed to fetch the job details."
+ module.exit_json(**res_dict)
+
+
+def create_profile(module, rest_obj):
+ mparam = module.params
+ payload = {}
+ template = get_template_details(module, rest_obj)
+ payload["TemplateId"] = template["Id"]
+ payload["NamePrefix"] = mparam.get("name_prefix")
+ payload["NumberOfProfilesToCreate"] = mparam["number_of_profiles"]
+ if mparam.get("description"):
+ payload["Description"] = mparam["description"]
+ boot_iso_dict = get_network_iso_payload(module)
+ if boot_iso_dict:
+ payload["NetworkBootToIso"] = boot_iso_dict
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ resp = rest_obj.invoke_request('POST', PROFILE_VIEW, data=payload)
+ profile_id_list = resp.json_data
+ module.exit_json(msg="Successfully created {0} profile(s).".format(len(profile_id_list)),
+ changed=True, profile_ids=profile_id_list)
+
+
+def modify_profile(module, rest_obj):
+ mparam = module.params
+ payload = {}
+ prof = get_profile(rest_obj, module)
+ if not prof:
+ module.fail_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name')))
+ diff = 0
+ new_name = mparam.get('new_name')
+ payload['Name'] = new_name if new_name else prof['ProfileName']
+ if new_name and new_name != prof['ProfileName']:
+ diff += 1
+ desc = mparam.get('description')
+ if desc and desc != prof['ProfileDescription']:
+ payload['Description'] = desc
+ diff += 1
+ boot_iso_dict = get_network_iso_payload(module)
+ rdict = prof.get('NetworkBootToIso') if prof.get('NetworkBootToIso') else {}
+ if boot_iso_dict:
+ nest_diff = recursive_diff(boot_iso_dict, rdict)
+ if nest_diff:
+ # module.warn(json.dumps(nest_diff))
+ if nest_diff[0]:
+ diff += 1
+ payload["NetworkBootToIso"] = boot_iso_dict
+ ad_opts = mparam.get("attributes")
+ if ad_opts and ad_opts.get("Attributes"):
+ diff = diff + attributes_check(module, rest_obj, ad_opts, prof['Id'])
+ if ad_opts.get("Attributes"):
+ payload["Attributes"] = ad_opts.get("Attributes")
+ payload['Id'] = prof['Id']
+ if diff:
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ resp = rest_obj.invoke_request('PUT', PROFILE_VIEW + "({0})".format(payload['Id']), data=payload)
+ module.exit_json(msg="Successfully modified the profile.", changed=True)
+ module.exit_json(msg=NO_CHANGES_MSG)
+
+
+def delete_profile(module, rest_obj):
+ mparam = module.params
+ if mparam.get('name'):
+ prof = get_profile(rest_obj, module)
+ if prof:
+ if prof['ProfileState'] > 0:
+ module.fail_json(msg="Profile has to be in an unassigned state for it to be deleted.")
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ resp = rest_obj.invoke_request('DELETE', PROFILE_VIEW + "({0})".format(prof['Id']))
+ module.exit_json(msg="Successfully deleted the profile.", changed=True)
+ else:
+ module.exit_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name')))
+ if mparam.get('filters'):
+ payload = mparam.get('filters')
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='Delete'), data=payload)
+ module.exit_json(msg="Successfully completed the delete operation.", changed=True)
+
+
+def migrate_profile(module, rest_obj):
+ mparam = module.params
+ payload = {}
+ payload['ForceMigrate'] = mparam.get('force')
+ target = get_target_details(module, rest_obj)
+ if not isinstance(target, dict):
+ module.fail_json(msg=target)
+ payload['TargetId'] = target['Id']
+ prof = get_profile(rest_obj, module)
+ if prof:
+ if target['Id'] == prof['TargetId']:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ try:
+ resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='GetInvalidTargetsForAssignProfile'),
+ data={'Id': prof['Id']})
+ if target['Id'] in list(resp.json_data):
+ module.fail_json(msg="The target device is invalid for the given profile.")
+ except HTTPError:
+ resp = None
+ if prof['ProfileState'] == 4: # migrate applicable in deployed state only
+ payload['ProfileId'] = prof['Id']
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='MigrateProfile'), data=payload)
+ msg = "Successfully applied the migrate operation."
+ res_dict = {'msg': msg, 'changed': True}
+ try:
+ time.sleep(5)
+ res_prof = get_profile(rest_obj, module)
+ if res_prof.get('DeploymentTaskId'):
+ res_dict['job_id'] = res_prof.get('DeploymentTaskId')
+ res_dict['msg'] = "Successfully triggered the job for the migrate operation."
+ except HTTPError:
+ res_dict['msg'] = "Successfully applied the migrate operation. Failed to fetch job details."
+ module.exit_json(**res_dict)
+ else:
+ module.fail_json(msg="Profile needs to be in a deployed state for a migrate operation.")
+ else:
+ module.fail_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name')))
+
+
+def profile_operation(module, rest_obj):
+ command = module.params.get("command")
+ if command == "create":
+ create_profile(module, rest_obj)
+ if command == "modify":
+ modify_profile(module, rest_obj)
+ if command == "delete":
+ delete_profile(module, rest_obj)
+ if command == "assign":
+ assign_profile(module, rest_obj)
+ if command == "unassign":
+ unassign_profile(module, rest_obj)
+ if command == "migrate":
+ migrate_profile(module, rest_obj)
+
+
+def main():
+ network_iso_spec = {"boot_to_network": {"required": True, "type": 'bool'},
+ "share_type": {"choices": ['NFS', 'CIFS']},
+ "share_ip": {"type": 'str'},
+ "share_user": {"type": 'str'},
+ "share_password": {"type": 'str', "no_log": True},
+ "workgroup": {"type": 'str'},
+ "iso_path": {"type": 'str'},
+ "iso_timeout": {"type": 'int', "default": 4,
+ "choices": [1, 2, 4, 8, 16]}}
+ assign_spec = {"Attributes": {"type": 'list', "elements": 'dict'},
+ "Options": {"type": 'dict'},
+ "Schedule": {"type": 'dict'}}
+ specs = {
+ "command": {"default": "create",
+ "choices": ['create', 'modify', 'delete', 'assign', 'unassign', 'migrate']},
+ "name_prefix": {"default": "Profile", "type": 'str'},
+ "name": {"type": 'str'},
+ "new_name": {"type": 'str'},
+ "number_of_profiles": {"default": 1, "type": 'int'},
+ "template_name": {"type": 'str'},
+ "template_id": {"type": "int"},
+ "device_id": {"type": 'int'},
+ "device_service_tag": {"type": 'str'},
+ "description": {"type": 'str'},
+ "boot_to_network_iso": {"type": 'dict', "options": network_iso_spec,
+ "required_if": [
+ ['boot_to_network', True, ['share_type', 'share_ip', 'iso_path']],
+ ['share_type', 'CIFS', ['share_user', 'share_password']]
+ ]},
+ "filters": {"type": 'dict'},
+ "attributes": {"type": 'dict', "options": assign_spec},
+ "force": {"default": False, "type": 'bool'}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ['command', 'create', ['template_name', 'template_id'], True],
+ ['command', 'modify', ['name']],
+ ['command', 'modify', ['new_name', 'description', 'attributes', 'boot_to_network_iso'], True],
+ ['command', 'assign', ['name']],
+ ['command', 'assign', ['device_id', 'device_service_tag'], True],
+ ['command', 'unassign', ['name', "filters"], True],
+ ['command', 'delete', ['name', "filters"], True],
+ ['command', 'migrate', ['name']],
+ ['command', 'migrate', ['device_id', 'device_service_tag'], True],
+ ],
+ mutually_exclusive=[
+ ['name', 'name_prefix'],
+ ['name', 'number_of_profiles'],
+ ['name', 'filters'],
+ ['device_id', 'device_service_tag'],
+ ['template_name', 'template_id']],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ profile_operation(module, rest_obj)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py
new file mode 100644
index 000000000..81e3cb2ca
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_server_interface_profile_info
+short_description: Retrieves the information of server interface profile on OpenManage Enterprise Modular.
+description: This module allows to retrieves the information of server interface profile
+ on OpenManage Enterprise Modular.
+version_added: "5.1.0"
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_id:
+ type: list
+ description:
+ - The ID of the device.
+ - I(device_id) is mutually exclusive with I(device_service_tag).
+ elements: int
+ device_service_tag:
+ type: list
+ description:
+ - The service tag of the device.
+ - I(device_service_tag) is mutually exclusive with I(device_id).
+ elements: str
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Retrieves the server interface profiles of all the device using device ID.
+ dellemc.openmanage.ome_server_interface_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id:
+ - 10001
+ - 10002
+
+- name: Retrieves the server interface profiles of all the device using device service tag.
+ dellemc.openmanage.ome_server_interface_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag:
+ - 6GHH6H2
+ - 6KHH6H3
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall status of the server interface profile information.
+ returned: on success
+ sample: "Successfully retrieved the server interface profile information."
+server_profiles:
+ type: list
+ description: Returns the information of collected server interface profile information.
+ returned: success
+ sample: [
+ {
+ "BondingTechnology": "LACP",
+ "Id": "6KZK6K2",
+ "ServerInterfaceProfile": [
+ {
+ "FabricId": "1ea6bf64-3cf0-4e06-a136-5046d874d1e7",
+ "Id": "NIC.Mezzanine.1A-1-1",
+ "NativeVLAN": 0,
+ "Networks": [
+ {
+ "CreatedBy": "system",
+ "CreationTime": "2018-11-27 10:22:14.140",
+ "Description": "VLAN 1",
+ "Id": 10001,
+ "InternalRefNWUUId": "add035b9-a971-400d-a3fa-bb365df1d476",
+ Name": "VLAN 1",
+ "Type": 2,
+ "UpdatedBy": null,
+ "UpdatedTime": "2018-11-27 10:22:14.140",
+ "VlanMaximum": 1,
+ "VlanMinimum": 1
+ }
+ ],
+ "NicBonded": true,
+ "OnboardedPort": "59HW8X2:ethernet1/1/1"
+ },
+ {
+ "FabricId": "3ea6be04-5cf0-4e05-a136-5046d874d1e6",
+ "Id": "NIC.Mezzanine.1A-2-1",
+ "NativeVLAN": 0,
+ "Networks": [
+ {
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "Description": null,
+ "Id": 10155,
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d",
+ "Name": "jagvlan",
+ "Type": 1,
+ "UpdatedBy": null,
+ "UpdatedTime": "2018-09-25 14:46:12.374",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143
+ }
+ ],
+ "NicBonded": false,
+ "OnboardedPort": "6H7J6Z2:ethernet1/1/1"
+ }
+ ]
+ }
+ ]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+"""
+
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+DOMAIN_URI = "ManagementDomainService/Domains"
+PROFILE_URI = "NetworkService/ServerProfiles"
+DEVICE_URI = "DeviceService/Devices"
+NETWORK_PROFILE_URI = "NetworkService/ServerProfiles('{0}')/ServerInterfaceProfiles"
+
+DOMAIN_FAIL_MSG = "The information retrieval operation of server interface profile is supported only on " \
+ "OpenManage Enterprise Modular."
+CONFIG_FAIL_MSG = "one of the following is required: device_id, device_service_tag."
+INVALID_DEVICE = "Unable to complete the operation because the entered " \
+ "target device {0}(s) '{1}' are invalid."
+PROFILE_ERR_MSG = "Unable to complete the operation because the server " \
+ "profile(s) for {0} do not exist in the Fabric Manager."
+SUCCESS_MSG = "Successfully retrieved the server interface profile information."
+
+
+def check_domain_service(module, rest_obj):
+ try:
+ rest_obj.invoke_request("GET", DOMAIN_URI, api_timeout=5)
+ except HTTPError as err:
+ err_message = json.load(err)
+ if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
+ module.fail_json(msg=DOMAIN_FAIL_MSG)
+ return
+
+
+def get_sip_info(module, rest_obj):
+ invalid, valid_service_tag, device_map = [], [], {}
+ device_id, tag = module.params.get("device_id"), module.params.get("device_service_tag")
+ key, value = ("Id", device_id) if device_id is not None else ("DeviceServiceTag", tag)
+ resp_data = rest_obj.get_all_report_details(DEVICE_URI)
+ if resp_data['report_list']:
+ for each in value:
+ each_device = list(filter(lambda d: d[key] in [each], resp_data["report_list"]))
+ if each_device and key == "DeviceServiceTag":
+ valid_service_tag.append(each)
+ elif each_device and key == "Id":
+ valid_service_tag.append(each_device[0]["DeviceServiceTag"])
+ device_map[each_device[0]["DeviceServiceTag"]] = each
+ if not each_device:
+ invalid.append(each)
+ if invalid:
+ err_value = "id" if key == "Id" else "service tag"
+ module.fail_json(msg=INVALID_DEVICE.format(err_value, ",".join(map(str, set(invalid)))))
+
+ invalid_fabric_tag, sip_info = [], []
+ for pro_id in valid_service_tag:
+ profile_dict = {}
+ try:
+ profile_resp = rest_obj.invoke_request("GET", "{0}('{1}')".format(PROFILE_URI, pro_id))
+ except HTTPError as err:
+ err_message = json.load(err)
+ if err_message.get('error', {}).get('@Message.ExtendedInfo')[0]["MessageId"] == "CDEV5008":
+ if key == "Id":
+ invalid_fabric_tag.append(device_map[pro_id])
+ else:
+ invalid_fabric_tag.append(pro_id)
+ else:
+ profile_data = rest_obj.strip_substr_dict(profile_resp.json_data)
+ profile_dict.update(profile_data)
+ np_resp = rest_obj.invoke_request("GET", NETWORK_PROFILE_URI.format(pro_id))
+ sip_strip = []
+ for each in np_resp.json_data["value"]:
+ np_strip_data = rest_obj.strip_substr_dict(each)
+ np_strip_data["Networks"] = [rest_obj.strip_substr_dict(each) for each in np_strip_data["Networks"]]
+ sip_strip.append(np_strip_data)
+ profile_dict["ServerInterfaceProfile"] = sip_strip
+ sip_info.append(profile_dict)
+
+ if invalid_fabric_tag:
+ module.fail_json(msg=PROFILE_ERR_MSG.format(", ".join(set(map(str, invalid_fabric_tag)))))
+ return sip_info
+
+
+def main():
+ argument_spec = {
+ "device_id": {"required": False, "type": "list", "elements": "int"},
+ "device_service_tag": {"required": False, "type": "list", "elements": "str"},
+ }
+ argument_spec.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[('device_id', 'device_service_tag')],
+ required_one_of=[["device_id", "device_service_tag"]],
+ supports_check_mode=True, )
+ if not any([module.params.get("device_id"), module.params.get("device_service_tag")]):
+ module.fail_json(msg=CONFIG_FAIL_MSG)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ check_domain_service(module, rest_obj)
+ sip_info = get_sip_info(module, rest_obj)
+ module.exit_json(msg=SUCCESS_MSG, server_profiles=sip_info)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError,
+ AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py
new file mode 100644
index 000000000..d30e7f382
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_server_interface_profiles
+short_description: Configure server interface profiles
+version_added: "5.1.0"
+description: This module allows to configure server interface profiles on OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ device_id:
+ description:
+ - Device id of the Server under chassis fabric.
+ - I(device_id) and I(device_service_tag) is mutually exclusive.
+ type: list
+ elements: int
+ device_service_tag:
+ description:
+ - Service tag of the Server under chassis fabric.
+ - I(device_service_tag) and I(device_id) is mutually exclusive.
+ type: list
+ elements: str
+ nic_teaming:
+ description:
+ - NIC teaming options.
+ - C(NoTeaming) the NICs are not bonded and provide no load balancing or redundancy.
+ - C(LACP) use LACP for NIC teaming.
+ - C(Other) use other technology for NIC teaming.
+ choices: ['LACP', 'NoTeaming', 'Other']
+ type: str
+ nic_configuration:
+ description: NIC configuration for the Servers to be applied.
+ type: list
+ elements: dict
+ suboptions:
+ nic_identifier:
+ description:
+ - ID of the NIC or port number.
+ - C(Note) This will not be validated.
+ type: str
+ required: True
+ team:
+ description:
+ - Group two or more ports. The ports must be connected to the same pair of Ethernet switches.
+ - I(team) is applicable only if I(nic_teaming) is C(LACP).
+ type: bool
+ untagged_network:
+ description:
+ - The maximum or minimum VLAN id of the network to be untagged.
+ - The I(untagged_network) can be retrieved using the M(dellemc.openmanage.ome_network_vlan_info)
+ - If I(untagged_network) needs to be unset this needs to be sent as C(0)
+ - C(Note) The network cannot be added as a untagged network if it is already assigned to a tagged network.
+ type: int
+ tagged_networks:
+ description:
+ - List of tagged networks
+ - Network cannot be added as a tagged network if it is already assigned to untagged network
+ type: dict
+ suboptions:
+ state:
+ description:
+ - Indicates if a list of networks needs to be added or deleted.
+ - C(present) to add the network to the tagged list
+ - C(absent) to delete the Network from the tagged list
+ choices: [present, absent]
+ type: str
+ default: present
+ names:
+ description:
+ - List of network name to be marked as tagged networks
+ - The I(names) can be retrieved using the M(dellemc.openmanage.ome_network_vlan_info)
+ type: list
+ elements: str
+ required: True
+ job_wait:
+ description:
+ - Provides the option to wait for job completion.
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 120
+requirements:
+ - "python >= 3.8.6"
+author: "Jagadeesh N V (@jagadeeshnv)"
+notes:
+ - This module supports C(check_mode).
+ - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+'''
+
+EXAMPLES = r'''
+---
+- name: Modify Server Interface Profile for the server using the service tag
+ dellemc.openmanage.ome_server_interface_profiles:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag:
+ - SVCTAG1
+ - SVCTAG2
+ nic_teaming: LACP
+ nic_configuration:
+ - nic_identifier: NIC.Mezzanine.1A-1-1
+ team: no
+ untagged_network: 2
+ tagged_networks:
+ names:
+ - vlan1
+ - nic_identifier: NIC.Mezzanine.1A-2-1
+ team: yes
+ untagged_network: 3
+ tagged_networks:
+ names:
+ - range120-125
+
+- name: Modify Server Interface Profile for the server using the device id
+ dellemc.openmanage.ome_server_interface_profiles:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id:
+ - 34523
+ - 48999
+ nic_teaming: NoTeaming
+ nic_configuration:
+ - nic_identifier: NIC.Mezzanine.1A-1-1
+ team: no
+ untagged_network: 2
+ tagged_networks:
+ names:
+ - vlan2
+ - nic_identifier: NIC.Mezzanine.1A-2-1
+ team: yes
+ untagged_network: 3
+ tagged_networks:
+ names:
+ - range120-125
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Status of the overall server interface operation.
+ returned: always
+ type: str
+ sample: Successfully triggered apply server profiles job.
+job_id:
+ description: Job ID of the task to apply the server interface profiles.
+ returned: on applying the Interface profiles
+ type: int
+ sample: 14123
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import \
+ get_rest_items, strip_substr_dict, job_tracking, apply_diff_key
+
+SERVER_PROFILE = "NetworkService/ServerProfiles('{service_tag}')"
+SERVER_INTERFACE = "NetworkService/ServerProfiles('{service_tag}')/ServerInterfaceProfiles"
+VLANS = "NetworkConfigurationService/Networks"
+DEVICE_URI = "DeviceService/Devices"
+APPLY_SERVER_PROFILES = "NetworkService/Actions/NetworkService.ApplyServersInterfaceProfiles"
+JOB_URI = "JobService/Jobs({job_id})"
+LAST_EXEC = "JobService/Jobs({job_id})/LastExecutionDetail"
+APPLY_TRIGGERED = "Successfully initiated the apply server profiles job."
+NO_STAG = "No profile found for service tag {service_tag}."
+CHANGES_MSG = "Changes found to be applied."
+NO_CHANGES_MSG = "No changes found to be applied."
+VLAN_NOT_FOUND = "The VLAN with a name {vlan_name} not found."
+DUPLICATE_NIC_IDENTIFIED = "Duplicate NIC identfiers provided."
+INVALID_UNTAGGED = "The untagged VLAN {id} provided for the NIC ID {nic_id} is not valid."
+NW_OVERLAP = "Network profiles of {service_tag} provided for tagged or untagged VLANs of {nic_id} overlaps."
+INVALID_DEV_ST = "Unable to complete the operation because the entered target device service tag(s) '{0}' are invalid."
+INVALID_DEV_ID = "Unable to complete the operation because the entered target device ids '{0}' are invalid."
+
+
+def get_valid_service_tags(module, rest_obj):
+ service_tags = []
+ nic_configs = module.params.get('nic_configuration')
+ if nic_configs:
+ nic_ids = [(nic.get('nic_identifier')) for nic in nic_configs]
+ if len(nic_ids) > len(set(nic_ids)):
+ module.exit_json(failed=True, msg=DUPLICATE_NIC_IDENTIFIED)
+ dev_map = get_rest_items(rest_obj, uri=DEVICE_URI)
+ if module.params.get('device_service_tag'):
+ cmp_set = set(module.params.get('device_service_tag')) - set(dict(dev_map).values())
+ if cmp_set:
+ module.exit_json(failed=True, msg=INVALID_DEV_ST.format(",".join(cmp_set)))
+ service_tags = list(set(module.params.get('device_service_tag')))
+ if module.params.get('device_id'):
+ cmp_set = set(module.params.get('device_id')) - set(dict(dev_map).keys())
+ if cmp_set:
+ module.exit_json(failed=True, msg=INVALID_DEV_ID.format(",".join(map(str, cmp_set))))
+ service_tags = [(dev_map.get(id)) for id in set(module.params.get('device_id'))]
+ return service_tags
+
+
+def _get_profile(module, rest_obj, stag):
+ prof = {}
+ try:
+ resp = rest_obj.invoke_request("GET", SERVER_PROFILE.format(service_tag=stag))
+ prof = resp.json_data
+ except HTTPError:
+ module.exit_json(failed=True, msg=NO_STAG.format(service_tag=stag))
+ return prof
+
+
+def _get_interface(module, rest_obj, stag):
+ intrfc_dict = {}
+ try:
+ intrfc = rest_obj.invoke_request("GET", SERVER_INTERFACE.format(service_tag=stag))
+ intrfc_list = intrfc.json_data.get("value")
+ intrfc_dict = dict((sip['Id'], {"NativeVLAN": sip['NativeVLAN'],
+ "NicBonded": sip["NicBonded"],
+ "Networks": set([(ntw['Id']) for ntw in sip['Networks']])
+ }) for sip in intrfc_list)
+ except HTTPError:
+ module.exit_json(failed=True, msg=NO_STAG.format(service_tag=stag))
+ return intrfc_dict
+
+
+def get_server_profiles(module, rest_obj, service_tags):
+ profile_dict = {}
+ for stag in service_tags:
+ prof = _get_profile(module, rest_obj, stag)
+ intrfc = _get_interface(module, rest_obj, stag)
+ prof["ServerInterfaceProfiles"] = intrfc
+ prof = strip_substr_dict(prof)
+ profile_dict[stag] = prof
+ return profile_dict
+
+
+def get_vlan_ids(rest_obj):
+ resp = rest_obj.invoke_request("GET", VLANS)
+ vlans = resp.json_data.get('value')
+ vlan_map = {}
+ natives = {}
+ for vlan in vlans:
+ vlan_map[vlan['Name']] = vlan['Id']
+ if vlan['VlanMaximum'] == vlan['VlanMinimum']:
+ natives[vlan['VlanMaximum']] = vlan['Id']
+ natives.update({0: 0})
+ return vlan_map, natives
+
+
+def compare_profile(template, profile):
+ diff = 0
+ diff = diff + apply_diff_key(template, profile, ["BondingTechnology"])
+ # bond_tex = profile["BondingTechnology"]
+ # ignore_bond = 0 if profile['BondingTechnology'] == 'LACP' else -1
+ sip = profile.get('ServerInterfaceProfiles')
+ for nic, ntw in sip.items():
+ tmp = template.get(nic, {})
+ diff = diff + apply_diff_key(tmp, ntw, ["NativeVLAN"])
+ diff = diff + apply_diff_key(tmp, ntw, ["NicBonded"])
+ untags = ntw.get("Networks")
+ s = set(untags) | set(tmp.get('present', set()))
+ s = s - set(tmp.get('absent', set()))
+ if s.symmetric_difference(set(untags)):
+ ntw["Networks"] = s
+ diff = diff + 1
+ return diff
+
+
+def get_template(module, vlan_dict, natives):
+ template = {"ServerInterfaceProfiles": {}}
+ mparams = module.params
+ ignore_teaming = True
+ if mparams.get('nic_teaming'):
+ template['BondingTechnology'] = mparams.get('nic_teaming')
+ if mparams.get('nic_teaming') != "LACP":
+ ignore_teaming = False
+ if mparams.get('nic_configuration'):
+ for nic in mparams.get('nic_configuration'):
+ nic_data = {}
+ if nic.get('team') is not None and ignore_teaming:
+ nic_data['NicBonded'] = nic.get('team') # if ignore_teaming else False
+ ntvlan = nic.get('untagged_network')
+ if ntvlan is not None:
+ if ntvlan not in natives:
+ module.exit_json(failed=True, msg=INVALID_UNTAGGED.format(id=ntvlan, nic_id=nic['nic_identifier']),
+ natives=natives)
+ nic_data['NativeVLAN'] = ntvlan
+ if nic.get('tagged_networks'):
+ tg = nic.get('tagged_networks')
+ nic_data[tg.get('state')] = set()
+ for vlan_name in tg.get('names'):
+ if vlan_name in vlan_dict:
+ nic_data[tg.get('state')].add(vlan_dict[vlan_name])
+ else:
+ module.exit_json(failed=True, msg=VLAN_NOT_FOUND.format(vlan_name=vlan_name))
+ template[nic['nic_identifier']] = nic_data
+ return template
+
+
+def get_payload(module, rest_obj, profile_dict):
+ vlan_dict, natives = get_vlan_ids(rest_obj)
+ template = get_template(module, vlan_dict, natives)
+ diff = 0
+ payload = []
+ for stag, prof in profile_dict.items():
+ df = compare_profile(template, prof)
+ if df:
+ sip_list = []
+ for k, v in prof["ServerInterfaceProfiles"].items():
+ if natives.get(v['NativeVLAN']) in set(v['Networks']):
+ module.exit_json(failed=True, msg=NW_OVERLAP.format(service_tag=stag, nic_id=k))
+ sips = {"Id": k, "NativeVLAN": v['NativeVLAN'], "NicBonded": v["NicBonded"],
+ "Networks": [({'Id': ntw}) for ntw in v['Networks']]}
+ sip_list.append(sips)
+ prof["ServerInterfaceProfiles"] = sip_list
+ payload.append(prof)
+ diff = diff + df
+ if not diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True) # , payload=payload)
+ return payload
+
+
+def handle_job(module, rest_obj, job_id):
+ if module.params.get("job_wait"):
+ job_failed, msg, job_dict, wait_time = job_tracking(
+ rest_obj, JOB_URI.format(job_id=job_id), max_job_wait_sec=module.params.get('job_wait_timeout'))
+ try:
+ job_resp = rest_obj.invoke_request('GET', LAST_EXEC.format(job_id=job_id))
+ msg = job_resp.json_data.get("Value")
+ msg = msg.replace('\n', ' ')
+ except Exception:
+ msg = job_dict.get('JobDescription', msg)
+ module.exit_json(failed=job_failed, msg=msg, job_id=job_id, changed=True)
+ else:
+ module.exit_json(changed=True, msg=APPLY_TRIGGERED, job_id=job_id)
+
+
+def main():
+ specs = {"device_id": {"type": 'list', "elements": 'int'},
+ "device_service_tag": {"type": 'list', "elements": 'str'},
+ "nic_teaming": {"choices": ['LACP', 'NoTeaming', 'Other']},
+ "nic_configuration": {
+ "type": 'list', "elements": 'dict',
+ "options": {
+ "nic_identifier": {"type": 'str', "required": True},
+ "team": {"type": 'bool'},
+ "untagged_network": {"type": 'int'},
+ "tagged_networks": {
+ "type": 'dict', "options": {
+ "state": {"choices": ['present', 'absent'], "default": 'present'},
+ "names": {"type": 'list', "elements": 'str', 'required': True}
+ },
+ }
+ }},
+ "job_wait": {"type": 'bool', "default": True},
+ "job_wait_timeout": {"type": 'int', "default": 120}}
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[
+ ('device_id', 'device_service_tag',)],
+ required_one_of=[('device_id', 'device_service_tag',),
+ ('nic_teaming', 'nic_configuration')],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ service_tags = get_valid_service_tags(module, rest_obj)
+ profiles = get_server_profiles(module, rest_obj, service_tags)
+ apply_data = get_payload(module, rest_obj, profiles)
+ resp = rest_obj.invoke_request("POST", APPLY_SERVER_PROFILES, data=apply_data)
+ jobid = resp.json_data.get("JobId")
+ handle_job(module, rest_obj, jobid)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py
new file mode 100644
index 000000000..b4cd907eb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py
@@ -0,0 +1,735 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_smart_fabric
+short_description: Create, modify or delete a fabric on OpenManage Enterprise Modular
+version_added: "2.1.0"
+description:
+ - This module allows to create a fabric, and modify or delete an existing fabric
+ on OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ state:
+ type: str
+ description:
+ - C(present) creates a new fabric or modifies an existing fabric.
+ - C(absent) deletes an existing fabric.
+ - "Notes: The create, modify, or delete fabric operation takes around 15-20 minutes to complete. It is recommended
+ not to start an another operation until the current operation is completed."
+ choices: [present, absent]
+ default: present
+ name:
+ required: true
+ type: str
+ description: Provide the I(name) of the fabric to be created, deleted or modified.
+ new_name:
+ type: str
+ description: Provide the I(name) of the fabric to be modified.
+ description:
+ type: str
+ description: Provide a short description of the fabric to be created or modified.
+ fabric_design:
+ type: str
+ description:
+ - "Specify the fabric topology.See the use API
+ U(https://www.dell.com/support/manuals/en-in/poweredge-mx7000/omem_1_20_10_ug/smartfabric-network-topologies)
+ to know why its topology."
+ - I(fabric_design) is mandatory for fabric creation.
+ choices: [2xMX5108n_Ethernet_Switches_in_same_chassis,
+ 2xMX9116n_Fabric_Switching_Engines_in_same_chassis,
+ 2xMX9116n_Fabric_Switching_Engines_in_different_chassis]
+ primary_switch_service_tag:
+ type: str
+ description:
+ - Service tag of the first switch.
+ - I(primary_switch_service_tag) is mandatory for fabric creation.
+ - I(primary_switch_service_tag) must belong to the model selected in I(fabric_design).
+ secondary_switch_service_tag:
+ type: str
+ description:
+ - Service tag of the second switch.
+ - I(secondary_switch_service_tag) is mandatory for fabric creation.
+ - I(secondary_switch_service_tag) must belong to the model selected in I(fabric_design).
+ override_LLDP_configuration:
+ type: str
+ description:
+ - Enable this configuration to allow Fabric Management Address to be included in LLDP messages.
+ - "Notes: OpenManage Enterprise Modular 1.0 does not support this option.
+ Some software networking solutions require a single management address to be transmitted by all Ethernet switches
+ to represent the entire fabric. Enable this feature only when connecting to such a solution."
+ choices: ['Enabled', 'Disabled']
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create a fabric
+ dellemc.openmanage.ome_smart_fabric:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ name: "fabric1"
+ description: "fabric desc"
+ fabric_design: "2xMX9116n_Fabric_Switching_Engines_in_different_chassis"
+ primary_switch_service_tag: "SVTG123"
+ secondary_switch_service_tag: "PXYT456"
+ override_LLDP_configuration: "Enabled"
+
+- name: Modify a fabric
+ dellemc.openmanage.ome_smart_fabric:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ name: "fabric1"
+ new_name: "fabric_gold1"
+ description: "new description"
+
+- name: Delete a fabric
+ dellemc.openmanage.ome_smart_fabric:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ name: "fabric1"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the fabric operation.
+ returned: always
+ sample: "Fabric creation operation is initiated."
+fabric_id:
+ type: str
+ description: Returns the ID when an fabric is created, modified or deleted.
+ returned: success
+ sample: "1312cceb-c3dd-4348-95c1-d8541a17d776"
+additional_info:
+ type: dict
+ description: Additional details of the fabric operation.
+ returned: when I(state=present) and additional information present in response.
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "RelatedProperties": [],
+ "Message": "Fabric update is successful. The OverrideLLDPConfiguration attribute is not provided in the
+ payload, so it preserves the previous value.",
+ "MessageArgs": [],
+ "Severity": "Informational",
+ "Resolution": "Please update the Fabric with the OverrideLLDPConfiguration as Disabled or Enabled if
+ necessary."
+ }
+ ]
+ }
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "RelatedProperties": [],
+ "Message": "Unable to perform operation, because the fabric manager was not reachable.",
+ "MessageArgs": [],
+ "Severity": "Warning",
+ "Resolution": "Make sure of the following and retry the operation: 1) There is at least one advanced
+ I/O Module in power-on mode. For example, MX9116n Ethernet Switch and MX5108n Ethernet Switch. However,
+ if an advanced I/O Module is available in the power-on mode, make sure that the network profile is not
+ set when the fabric manager is in the switch-over mode. 2) If the issue persists, wait for few minutes and retry the operation."
+ }
+ ]
+ }
+}
+'''
+
+import json
+import socket
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ssl import SSLError
+
+FABRIC_URI = "NetworkService/Fabrics"
+FABRIC_ID_URI = "NetworkService/Fabrics('{fabric_id}')"
+DOMAIN_URI = "ManagementDomainService/Domains"
+DEVICE_URI = "DeviceService/Devices"
+
+MSM_URI = "DeviceService/Devices({lead_chassis_device_id})/InventoryDetails('deviceSoftware')"
+
+CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied."
+CHECK_MODE_CHANGE_NOT_FOUND_MSG = "No Changes found to be applied."
+FABRIC_NOT_FOUND_ERROR_MSG = "The smart fabric '{0}' is not present in the system."
+DOMAIN_SERVICE_TAG_ERROR_MSG = "Unable to retrieve the domain information because the" \
+ " domain of the provided service tag {0} is not available."
+LEAD_CHASSIS_ERROR_MSG = "System should be a lead chassis if the assigned fabric topology type is {0}."
+SYSTEM_NOT_SUPPORTED_ERROR_MSG = "Fabric management is not supported on the specified system."
+DESIGN_MODEL_ERROR_MSG = "The network type of the {0} must be {1}."
+DEVICE_SERVICE_TAG_TYPE_ERROR_MSG = "The {0} type must be {1}."
+DEVICE_SERVICE_TAG_NOT_FOUND_ERROR_MSG = "Unable to retrieve the device information because the device" \
+ " with the provided service tag {0} is not available."
+IDEMPOTENCY_MSG = "Specified fabric details are the same as the existing settings."
+REQUIRED_FIELD = "Options 'fabric_design', 'primary_switch_service_tag' and 'secondary_switch_service_tag'" \
+ " are required for fabric creation."
+DUPLICATE_TAGS = "The switch details of the primary switch overlaps with the secondary switch details."
+PRIMARY_SWITCH_OVERLAP_MSG = "The primary switch service tag is overlapping with existing secondary switch details."
+SECONDARY_SWITCH_OVERLAP_MSG = "The switch details of the secondary switch overlaps with the existing primary" \
+ " switch details."
+
+
+def get_service_tag_with_fqdn(rest_obj, module):
+ """
+ get the service tag, if hostname is dnsname
+ """
+ hostname = module.params["hostname"]
+ service_tag = None
+ device_details = rest_obj.get_all_items_with_pagination(DEVICE_URI)
+ for each_device in device_details["value"]:
+ for item in each_device["DeviceManagement"]:
+ if item.get("DnsName") == hostname or item.get('NetworkAddress') == hostname:
+ return each_device["DeviceServiceTag"]
+ return service_tag
+
+
+def validate_lead_msm_version(each_domain, module, fabric_design=None):
+ """
+ validate lead chassis for design type
+ and find the msm version of the domain
+ """
+ role_type = each_domain["DomainRoleTypeValue"].upper()
+ if fabric_design and fabric_design == "2xMX9116n_Fabric_Switching_Engines_in_different_chassis" and \
+ role_type != "LEAD":
+ module.fail_json(msg=LEAD_CHASSIS_ERROR_MSG.format(fabric_design))
+ msm_version = each_domain["Version"]
+ return msm_version
+
+
+def get_ip_from_host(hostname):
+ """
+ workaround:
+ when Virtual IP DNS name used in hostname, the DNS Name not reflected in device info
+ instead it shows original IP DNSName which causes failure in finding service tag of the device
+ Solution: Convert VIP DNS name to IP
+ """
+ ipaddr = hostname
+ try:
+ result = socket.getaddrinfo(hostname, None)
+ last_element = result[-1]
+ ip_address = last_element[-1][0]
+ if ip_address:
+ ipaddr = ip_address
+ except socket.gaierror:
+ ipaddr = hostname
+ except Exception:
+ ipaddr = hostname
+ return ipaddr
+
+
+def get_msm_device_details(rest_obj, module):
+ """
+ Get msm details
+ :param rest_obj: session object
+ :param module: Ansible module object
+ :return: tuple
+ 1st item: service tag of the domain
+ 2nd item: msm version of ome-M device
+ """
+ hostname = get_ip_from_host(module.params["hostname"])
+ fabric_design = module.params.get("fabric_design")
+ msm_version = ""
+ service_tag = get_service_tag_with_fqdn(rest_obj, module)
+ domain_details = rest_obj.get_all_items_with_pagination(DOMAIN_URI)
+ for each_domain in domain_details["value"]:
+ if service_tag and service_tag == each_domain["Identifier"]:
+ msm_version = validate_lead_msm_version(each_domain, module, fabric_design)
+ break
+ if hostname in each_domain["PublicAddress"]:
+ msm_version = validate_lead_msm_version(each_domain, module, fabric_design)
+ service_tag = each_domain["Identifier"]
+ break
+ else:
+ module.fail_json(msg=SYSTEM_NOT_SUPPORTED_ERROR_MSG)
+ return service_tag, msm_version
+
+
+def compare_payloads(modify_payload, current_payload):
+ """
+ :param modify_payload: payload created to update existing setting
+ :param current_payload: already existing payload for specified fabric
+ :return: bool - compare existing and requested setting values of fabric in case of modify operations
+ if both are same return True
+ """
+ diff = False
+ for key, val in modify_payload.items():
+ if current_payload is None or current_payload.get(key) is None:
+ return True
+ elif isinstance(val, dict):
+ if compare_payloads(val, current_payload.get(key)):
+ return True
+ elif val != current_payload.get(key):
+ return True
+ return diff
+
+
+def idempotency_check_for_state_present(fabric_id, current_payload, expected_payload, module):
+ """
+ idempotency check in case of state present
+ :param fabric_id: fabric id
+ :param current_payload: payload created
+ :param expected_payload: already existing payload for specified fabric
+ :param module: ansible module object
+ :return: None
+ """
+ if fabric_id:
+ exp_dict = expected_payload.copy()
+ cur_dict = current_payload.copy()
+ for d in (exp_dict, cur_dict):
+ fab_dz_lst = d.pop("FabricDesignMapping", [])
+ for fab in fab_dz_lst:
+ d[fab.get('DesignNode')] = fab.get('PhysicalNode')
+ payload_diff = compare_payloads(exp_dict, cur_dict)
+ if module.check_mode:
+ if payload_diff:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
+ else:
+ module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False)
+ elif not payload_diff:
+ module.exit_json(msg=IDEMPOTENCY_MSG, changed=False)
+ else:
+ if module.check_mode:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
+
+
+def design_node_dict_update(design_node_map):
+ """
+ make one level dictionary for design map for easy processing
+ :param design_node_map: design node map content
+ :return: dict
+ """
+ d = {}
+ for item in design_node_map:
+ if item["DesignNode"] == "Switch-A" and item.get('PhysicalNode'):
+ d.update({'PhysicalNode1': item['PhysicalNode']})
+ if item["DesignNode"] == "Switch-B" and item.get('PhysicalNode'):
+ d.update({'PhysicalNode2': item['PhysicalNode']})
+ return d
+
+
+def validate_switches_overlap(current_dict, modify_dict, module):
+ """
+ Validation in case of modify operation when current setting user provided switches details overlaps
+ :param current_dict: modify payload created
+ :param modify_dict: current payload of specified fabric
+ :param module: Ansible module object
+ """
+ modify_primary_switch = modify_dict.get("PhysicalNode1")
+ current_secondary_switch = current_dict.get("PhysicalNode2")
+ modify_secondary_switch = modify_dict.get("PhysicalNode2")
+ current_primary_switch = current_dict.get("PhysicalNode1")
+ if modify_primary_switch and current_primary_switch != modify_primary_switch:
+ module.fail_json(msg="The modify operation does not support primary_switch_service_tag update.")
+ if modify_secondary_switch and current_secondary_switch != modify_secondary_switch:
+ module.fail_json(msg="The modify operation does not support secondary_switch_service_tag update.")
+ flag = all([modify_primary_switch, modify_secondary_switch, current_primary_switch,
+ current_secondary_switch]) and (modify_primary_switch == current_secondary_switch and
+ modify_secondary_switch == current_primary_switch)
+ if not flag and modify_primary_switch is not None and current_secondary_switch is not None and \
+ modify_primary_switch == current_secondary_switch:
+ module.fail_json(PRIMARY_SWITCH_OVERLAP_MSG)
+ if not flag and modify_secondary_switch is not None and current_primary_switch is not None and \
+ modify_secondary_switch == current_primary_switch:
+ module.fail_json(SECONDARY_SWITCH_OVERLAP_MSG)
+
+
+def fabric_design_map_payload_creation(design_map_modify_payload, design_map_current_payload, module):
+ """
+ process FabricDesignMapping contents
+ :param design_map_modify_payload: modify payload created
+ :param design_map_current_payload: current payload of specified fabric
+ :param module: Ansible module object
+ :return: list
+ """
+ modify_dict = design_node_dict_update(design_map_modify_payload)
+ current_dict = design_node_dict_update(design_map_current_payload)
+ validate_switches_overlap(current_dict, modify_dict, module)
+ current_dict.update(modify_dict)
+ design_list = []
+ for key, val in current_dict.items():
+ if key == "PhysicalNode1":
+ design_list.append({'DesignNode': 'Switch-A', 'PhysicalNode': val})
+ else:
+ design_list.append({'DesignNode': 'Switch-B', 'PhysicalNode': val})
+ return design_list
+
+
+def merge_payload(modify_payload, current_payload, module):
+ """
+ :param modify_payload: payload created to update existing setting
+ :param current_payload: already existing payload for specified fabric
+ :param module: Ansible module object
+ :return: bool - compare existing and requested setting values of fabric in case of modify operations
+ if both are same return True
+ """
+ _current_payload = dict(current_payload)
+ _current_payload.update(modify_payload)
+ if modify_payload.get("FabricDesign") and current_payload.get("FabricDesign"):
+ _current_payload["FabricDesign"].update(modify_payload["FabricDesign"])
+ elif modify_payload.get("FabricDesign") and not current_payload.get("FabricDesign"):
+ _current_payload["FabricDesign"] = modify_payload["FabricDesign"]
+ fabric_design_map_list = fabric_design_map_payload_creation(modify_payload.get("FabricDesignMapping", []),
+ current_payload.get("FabricDesignMapping", []), module)
+ if fabric_design_map_list:
+ _current_payload.update({"FabricDesignMapping": fabric_design_map_list})
+ return _current_payload
+
+
+def get_fabric_design(fabric_design_uri, rest_obj):
+ """
+ Get the fabric design name from the fabric design uri which is returned from GET request
+ :param fabric_design_uri: fabric design uri
+ :param rest_obj: session object
+ :return: dict
+ """
+ fabric_design = {}
+ if fabric_design_uri:
+ resp = rest_obj.invoke_request("GET", fabric_design_uri.split('/api/')[-1])
+ design_type = resp.json_data.get("Name")
+ fabric_design = {"Name": design_type}
+ return fabric_design
+
+
+def get_current_payload(fabric_details, rest_obj):
+ """
+ extract payload from existing fabric details, which is
+ obtained from GET request of existing fabric, to match with payload created
+ :param fabric_details: dict - specified fabric details
+ :return: dict
+ """
+ if fabric_details.get("OverrideLLDPConfiguration") and fabric_details.get("OverrideLLDPConfiguration") not in \
+ ["Enabled", "Disabled"]:
+ fabric_details.pop("OverrideLLDPConfiguration", None)
+ payload = {
+ "Id": fabric_details["Id"],
+ "Name": fabric_details["Name"],
+ "Description": fabric_details.get("Description"),
+ "OverrideLLDPConfiguration": fabric_details.get("OverrideLLDPConfiguration"),
+ "FabricDesignMapping": fabric_details.get("FabricDesignMapping", []),
+ "FabricDesign": get_fabric_design(fabric_details["FabricDesign"].get("@odata.id"), rest_obj)
+
+ }
+ return dict([(k, v) for k, v in payload.items() if v])
+
+
+def create_modify_payload(module_params, fabric_id, msm_version):
+ """
+ payload creation for fabric management in case of create/modify operations
+ :param module_params: ansible module parameters
+ :param fabric_id: fabric id in case of modify operation
+ :param msm_version: msm version details
+ :return: dict
+ """
+ backup_params = dict([(k, v) for k, v in module_params.items() if v])
+ _payload = {
+ "Name": backup_params["name"],
+ "Description": backup_params.get("description"),
+ "OverrideLLDPConfiguration": backup_params.get("override_LLDP_configuration"),
+ "FabricDesignMapping": [],
+ "FabricDesign": {}
+ }
+ if backup_params.get("primary_switch_service_tag"):
+ _payload["FabricDesignMapping"].append({
+ "DesignNode": "Switch-A",
+ "PhysicalNode": backup_params["primary_switch_service_tag"]
+ })
+ if backup_params.get("secondary_switch_service_tag"):
+ _payload["FabricDesignMapping"].append({
+ "DesignNode": "Switch-B",
+ "PhysicalNode": backup_params["secondary_switch_service_tag"]
+ })
+ if backup_params.get("fabric_design"):
+ _payload.update({"FabricDesign": {"Name": backup_params["fabric_design"]}})
+ if msm_version.startswith("1.0"): # OverrideLLDPConfiguration attribute not supported in msm 1.0 version
+ _payload.pop("OverrideLLDPConfiguration", None)
+ if fabric_id: # update id/name in case of modify operation
+ _payload["Name"] = backup_params.get("new_name", backup_params["name"])
+ _payload["Id"] = fabric_id
+ payload = dict([(k, v) for k, v in _payload.items() if v])
+ return payload
+
+
+def get_fabric_id_details(name, all_fabrics):
+ """
+ obtain the fabric id using fabric name
+ :param name: fabric name
+ :param all_fabrics: All available fabric in the system
+ :return: tuple
+ 1st item: fabric id
+ 2nd item: all details of fabric specified in dict
+ """
+ fabric_id, fabric_details = None, None
+ for fabric_each in all_fabrics:
+ if fabric_each["Name"] == name:
+ fabric_id = fabric_each["Id"]
+ fabric_details = fabric_each
+ break
+ return fabric_id, fabric_details
+
+
+def validate_device_type(device_type_name, identifier, device_details, module):
+ """
+ Validation for iom and chassis device type and also design modes of model
+ :param device_type_name: device type name eg: NETWORK_IOM, CHASSIS
+ :param identifier: identifier to access device type name
+ :param device_details: all details of device
+ :param module: ansible module object
+ :return: None
+ """
+ device_map = {
+ "primary_switch_service_tag": "NETWORK_IOM",
+ "secondary_switch_service_tag": "NETWORK_IOM",
+ "hostname": "CHASSIS"
+ }
+ design_mode = module.params.get("fabric_design")
+ if device_type_name != device_map[identifier]:
+ module.fail_json(
+ msg=DEVICE_SERVICE_TAG_TYPE_ERROR_MSG.format(identifier, device_map[identifier]))
+ if device_type_name != "CHASSIS" and design_mode:
+ design_model = design_mode.split("_")[0].split('2x')[-1]
+ identifier_model = device_details["Model"]
+ if design_model not in identifier_model:
+ module.fail_json(
+ msg=DESIGN_MODEL_ERROR_MSG.format(identifier, design_model))
+
+
+def validate_service_tag(device_service_tag, identifier, device_type_map, rest_obj, module):
+ """
+ Validate the service tag and device type of device
+ :param identifier: identifier options which required find service tag from module params
+ primary_switch_service_tag, secondary_switch_service_tag, hostname
+ :param device_service_tag: device service tag
+ :param device_type_map: map to get the
+ :param rest_obj: session object
+ :param module: ansible module object
+ :return: None
+ """
+ if device_service_tag is not None:
+ device_id_details = rest_obj.get_device_id_from_service_tag(device_service_tag)
+ device_details = device_id_details["value"]
+ if device_id_details["Id"] is None:
+ module.fail_json(msg=DEVICE_SERVICE_TAG_NOT_FOUND_ERROR_MSG.format(device_service_tag))
+ identifier_device_type = device_details["Type"]
+ validate_device_type(device_type_map[identifier_device_type], identifier, device_details, module)
+
+
+def validate_devices(host_service_tag, rest_obj, module):
+ """
+ validate domain, primary switch and secondary switch devices
+ :param host_service_tag: service tag of the hostname provided
+ :param rest_obj: session object
+ :param module: Ansible module object
+ :return: None
+ """
+ primary = module.params.get("primary_switch_service_tag")
+ secondary = module.params.get("secondary_switch_service_tag")
+ device_type_map = rest_obj.get_device_type()
+ validate_service_tag(host_service_tag, "hostname", device_type_map, rest_obj, module)
+ validate_service_tag(primary,
+ "primary_switch_service_tag",
+ device_type_map, rest_obj, module)
+ validate_service_tag(secondary,
+ "secondary_switch_service_tag",
+ device_type_map, rest_obj,
+ module)
+
+
+def required_field_check_for_create(fabric_id, module):
+ params = module.params
+ if not fabric_id and not all([params.get("fabric_design"), params.get("primary_switch_service_tag"),
+ params.get("secondary_switch_service_tag")]):
+ module.fail_json(msg=REQUIRED_FIELD)
+
+
+def process_output(name, fabric_resp, msg, fabric_id, rest_obj, module):
+ """
+ fabric management actions creation/update of smart fabric output details processing
+ :param name: fabric name specified
+ :param fabric_resp: json response from ome
+ :param msg: specific message of create and modify operation
+ :param fabric_id: fabric id in case of modify
+ :param rest_obj: current session object
+ :param module: Ansible module object
+ :return: None
+ """
+ identifier = fabric_resp
+ if fabric_id:
+ identifier = fabric_id
+ if isinstance(fabric_resp, dict):
+ all_fabrics = rest_obj.get_all_items_with_pagination(FABRIC_URI)["value"]
+ identifier, current_fabric_details = get_fabric_id_details(name, all_fabrics)
+ if not identifier:
+ identifier = ""
+ module.exit_json(msg=msg, fabric_id=identifier, additional_info=fabric_resp, changed=True)
+ module.exit_json(msg=msg, fabric_id=identifier, changed=True)
+
+
+def validate_modify(module, current_payload):
+ """Fabric modification does not support fabric design type modification"""
+ if module.params.get("fabric_design") and current_payload["FabricDesign"]["Name"] and \
+ (module.params.get("fabric_design") != current_payload["FabricDesign"]["Name"]):
+ module.fail_json(msg="The modify operation does not support fabric_design update.")
+
+
+def create_modify_fabric(name, all_fabric, rest_obj, module):
+ """
+ fabric management actions creation/update of smart fabric
+ :param all_fabric: all available fabrics in system
+ :param rest_obj: current session object
+ :param module: ansible module object
+ :param name: fabric name specified
+ :return: None
+ """
+ fabric_id, current_fabric_details = get_fabric_id_details(name, all_fabric)
+ required_field_check_for_create(fabric_id, module)
+ host_service_tag, msm_version = get_msm_device_details(rest_obj, module)
+ validate_devices(host_service_tag, rest_obj, module)
+ uri = FABRIC_URI
+ expected_payload = create_modify_payload(module.params, fabric_id, msm_version)
+ payload = dict(expected_payload)
+ method = "POST"
+ msg = "Fabric creation operation is initiated."
+ current_payload = {}
+ if fabric_id:
+ current_payload = get_current_payload(current_fabric_details, rest_obj)
+ validate_modify(module, current_payload)
+ method = "PUT"
+ msg = "Fabric modification operation is initiated."
+ uri = FABRIC_ID_URI.format(fabric_id=fabric_id)
+ payload = merge_payload(expected_payload, current_payload, module)
+ idempotency_check_for_state_present(fabric_id, current_payload, expected_payload, module)
+ resp = rest_obj.invoke_request(method, uri, data=payload)
+ fabric_resp = resp.json_data
+ process_output(name, fabric_resp, msg, fabric_id, rest_obj, module)
+
+
+def check_fabric_exits_for_state_absent(fabric_values, module, fabric_name):
+ """
+ idempotency check in case of state absent
+ :param fabric_values: fabric details of existing fabric
+ :param module: ansible module object
+ :param fabric_name: fabric name
+ :return: str - fabric id
+ """
+ fabric_id, fabric_details = get_fabric_id_details(fabric_name, fabric_values)
+ if module.check_mode and fabric_id is None:
+ module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG)
+ if module.check_mode and fabric_id is not None:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
+ if not module.check_mode and fabric_id is None:
+ module.exit_json(msg=FABRIC_NOT_FOUND_ERROR_MSG.format(fabric_name))
+ return fabric_id
+
+
+def delete_fabric(all_fabrics, rest_obj, module, name):
+ """
+ deletes the fabric specified
+ :param all_fabrics: All available fabric in system
+ :param rest_obj: session object
+ :param module: ansible module object
+ :param name: fabric name specified
+ :return: None
+ """
+ fabric_id = check_fabric_exits_for_state_absent(all_fabrics, module, name)
+ rest_obj.invoke_request("DELETE", FABRIC_ID_URI.format(fabric_id=fabric_id))
+ module.exit_json(msg="Fabric deletion operation is initiated.", fabric_id=fabric_id, changed=True)
+
+
+def fabric_actions(rest_obj, module):
+ """
+ fabric management actions
+ :param rest_obj: session object
+ :param module: ansible module object
+ :return: None
+ """
+ module_params = module.params
+ state = module_params["state"]
+ name = module_params["name"]
+ all_fabrics = rest_obj.get_all_items_with_pagination(FABRIC_URI)["value"]
+ if state == "present":
+ create_modify_fabric(name, all_fabrics, rest_obj, module)
+ else:
+ delete_fabric(all_fabrics, rest_obj, module, name)
+
+
+def main():
+ design_choices = ['2xMX5108n_Ethernet_Switches_in_same_chassis',
+ '2xMX9116n_Fabric_Switching_Engines_in_same_chassis',
+ '2xMX9116n_Fabric_Switching_Engines_in_different_chassis'
+ ]
+ specs = {
+ "state": {"type": "str", "required": False, "default": "present", "choices": ['present', 'absent']},
+ "name": {"required": True, "type": "str"},
+ "new_name": {"required": False, "type": "str"},
+ "description": {"required": False, "type": "str"},
+ "fabric_design": {"required": False, "type": "str",
+ "choices": design_choices},
+ "primary_switch_service_tag": {"required": False, "type": "str"},
+ "secondary_switch_service_tag": {"required": False, "type": "str"},
+ "override_LLDP_configuration": {"required": False, "type": "str", "choices": ['Enabled', 'Disabled']},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['state', 'present', ('new_name', 'description', 'fabric_design', 'primary_switch_service_tag',
+ 'secondary_switch_service_tag', 'override_LLDP_configuration',), True]],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ fabric_actions(rest_obj, module)
+ except HTTPError as err:
+ if err.code == 501:
+ module.fail_json(msg=SYSTEM_NOT_SUPPORTED_ERROR_MSG, error_info=json.load(err))
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py
new file mode 100644
index 000000000..cae5d8d69
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py
@@ -0,0 +1,544 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.3.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_smart_fabric_uplink
+short_description: Create, modify or delete a uplink for a fabric on OpenManage Enterprise Modular
+version_added: "2.1.0"
+description: This module allows to create, modify or delete an uplink for a fabric.
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ state:
+ description:
+ - C(present)
+ - Creates a new uplink with the provided I(name).
+ - Modifies an existing uplink with the provided I(name).
+ - C(absent) – Deletes the uplink with the provided I(name).
+ - I(WARNING) Delete operation can impact the network infrastructure.
+ choices: [present, absent]
+ default: present
+ type: str
+ fabric_name:
+ type: str
+ description: Provide the I(fabric_name) of the fabric for which the uplink is to be configured.
+ required: true
+ name:
+ type: str
+ description: Provide the I(name) of the uplink to be created, modified or deleted.
+ required: true
+ new_name:
+ type: str
+ description: Provide the new I(new_name) for the uplink.
+ description:
+ type: str
+ description: Provide a short description for the uplink to be created or modified.
+ uplink_type:
+ description:
+ - Specify the uplink type.
+ - I(NOTE) The uplink type cannot be changed for an existing uplink.
+ choices: ['Ethernet', 'FCoE', 'FC Gateway', 'FC Direct Attach', 'Ethernet - No Spanning Tree']
+ type: str
+ ufd_enable:
+ description:
+ - "Add or Remove the uplink to the Uplink Failure Detection (UFD) group. The UFD group identifies the loss of
+ connectivity to the upstream switch and notifies the servers that are connected to the switch. During an uplink
+ failure, the switch disables the corresponding downstream server ports. The downstream servers can then select
+ alternate connectivity routes, if available."
+ - "I(WARNING) The firmware version of the I/O Module running the Fabric Manager must support this configuration
+ feature. If not, uplink creation will be successful with an appropriate error message in response."
+ choices: ['Enabled', 'Disabled']
+ type: str
+ primary_switch_service_tag:
+ description: Service tag of the primary switch.
+ type: str
+ primary_switch_ports:
+ description:
+ - The IOM slots to be connected to the primary switch.
+ - I(primary_switch_service_tag) is mandatory for this option.
+ type: list
+ elements: str
+ secondary_switch_service_tag:
+ description: Service tag of the secondary switch.
+ type: str
+ secondary_switch_ports:
+ description:
+ - The IOM slots to be connected to the secondary switch.
+ - I(secondary_switch_service_tag) is mandatory for this option.
+ type: list
+ elements: str
+ tagged_networks:
+ description: VLANs to be associated with the uplink I(name).
+ type: list
+ elements: str
+ untagged_network:
+ description: Specify the name of the VLAN to be added as untagged to the uplink.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create an Uplink
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ fabric_name: "fabric1"
+ name: "uplink1"
+ description: "CREATED from OMAM"
+ uplink_type: "Ethernet"
+ ufd_enable: "Enabled"
+ primary_switch_service_tag: "ABC1234"
+ primary_switch_ports:
+ - ethernet1/1/13
+ - ethernet1/1/14
+ secondary_switch_service_tag: "XYZ1234"
+ secondary_switch_ports:
+ - ethernet1/1/13
+ - ethernet1/1/14
+ tagged_networks:
+ - vlan1
+ - vlan3
+ untagged_network: vlan2
+ tags: create_uplink
+
+- name: Modify an existing uplink
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ fabric_name: "fabric1"
+ name: "uplink1"
+ new_name: "uplink2"
+ description: "Modified from OMAM"
+ uplink_type: "Ethernet"
+ ufd_enable: "Disabled"
+ primary_switch_service_tag: "DEF1234"
+ primary_switch_ports:
+ - ethernet1/2/13
+ - ethernet1/2/14
+ secondary_switch_service_tag: "TUV1234"
+ secondary_switch_ports:
+ - ethernet1/2/13
+ - ethernet1/2/14
+ tagged_networks:
+ - vlan11
+ - vlan33
+ untagged_network: vlan22
+ tags: modify_uplink
+
+- name: Delete an Uplink
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ fabric_name: "fabric1"
+ name: "uplink1"
+ tags: delete_uplink
+
+- name: Modify an Uplink name
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ fabric_name: "fabric1"
+ name: "uplink1"
+ new_name: "uplink2"
+ tags: modify_uplink_name
+
+- name: Modify Uplink ports
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ fabric_name: "fabric1"
+ name: "uplink1"
+ description: "uplink ports modified"
+ primary_switch_service_tag: "ABC1234"
+ primary_switch_ports:
+ - ethernet1/1/6
+ - ethernet1/1/7
+ secondary_switch_service_tag: "XYZ1234"
+ secondary_switch_ports:
+ - ethernet1/1/9
+ - ethernet1/1/10
+ tags: modify_ports
+
+- name: Modify Uplink networks
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ fabric_name: "fabric1"
+ name: "create1"
+ description: "uplink networks modified"
+ tagged_networks:
+ - vlan4
+ tags: modify_networks
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the uplink operation.
+ returned: always
+ sample: "Successfully modified the uplink."
+uplink_id:
+ type: str
+ description: Returns the ID when an uplink is created or modified.
+ returned: when I(state=present)
+ sample: "ddc3d260-fd71-46a1-97f9-708e12345678"
+additional_info:
+ type: dict
+ description: Additional details of the fabric operation.
+ returned: when I(state=present) and additional information present in response.
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to configure the Uplink Failure Detection mode on the uplink because the firmware
+ version of the I/O Module running the Fabric Manager does not support the configuration feature.",
+ "MessageArgs": [],
+ "MessageId": "CDEV7151",
+ "RelatedProperties": [],
+ "Resolution": "Update the firmware version of the I/O Module running the Fabric Manager and retry
+ the operation. For information about the recommended I/O Module firmware versions, see the
+ OpenManage Enterprise-Modular User's Guide available on the support site.",
+ "Severity": "Informational"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide
+ for more information about resource URI and its properties."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_item_and_list
+
+FABRIC_URI = "NetworkService/Fabrics"
+UPLINKS_URI = "NetworkService/Fabrics('{fabric_id}')/Uplinks"
+UPLINK_URI = "NetworkService/Fabrics('{fabric_id}')/Uplinks('{uplink_id}')"
+APPLICABLE_NETWORKS = "NetworkService/Fabrics('{fabric_id}')/NetworkService.GetApplicableUplinkNetworks"
+APPLICABLE_UNTAGGED = "NetworkService/Fabrics('{fabric_id}')/NetworkService.GetApplicableUplinkUntaggedNetworks"
+IOM_DEVICES = "DeviceService/Devices?$filter=Type%20eq%204000"
+PORT_INFO = "DeviceService/Devices({device_id})/InventoryDetails('portInformation')"
+MEDIA_TYPES = "NetworkService/UplinkTypes"
+VLAN_CONFIG = "NetworkConfigurationService/Networks"
+# Messages
+CHECK_MODE_MSG = "Changes found to be applied."
+NO_CHANGES_MSG = "No changes found to be applied to the uplink configuration."
+SAME_SERVICE_TAG_MSG = "Primary and Secondary service tags must not be the same."
+
+
+def get_item_id(rest_obj, name, uri, key='Name', attr='Id', value='value'):
+ resp = rest_obj.invoke_request('GET', uri)
+ tlist = []
+ if resp.success and resp.json_data.get(value):
+ tlist = resp.json_data.get(value, [])
+ for xtype in tlist:
+ if xtype.get(key, "") == name:
+ return xtype.get(attr), tlist
+ return 0, tlist
+
+
+def get_all_uplink_ports(uplinks):
+ portlist = []
+ for uplink in uplinks:
+ portlist = portlist + uplink.get("Ports")
+ return portlist
+
+
+def validate_ioms(module, rest_obj, uplinks):
+ uplinkports = get_all_uplink_ports(uplinks)
+ payload_ports = []
+ occupied_ports = []
+ used_ports = []
+ for idx in uplinkports:
+ used_ports.append(idx["Id"])
+ iomsts = ("primary", "secondary")
+ for iom in iomsts:
+ prim_st = module.params.get(iom + "_switch_service_tag")
+ if prim_st:
+ prim_ports = list(str(port).strip() for port in module.params.get(iom + "_switch_ports"))
+ id, ioms = get_item_id(rest_obj, prim_st, IOM_DEVICES, key="DeviceServiceTag")
+ if not id:
+ module.fail_json(msg="Device with service tag {0} does not exist.".format(prim_st))
+ resp = rest_obj.invoke_request("GET", PORT_INFO.format(device_id=id))
+ port_info_data = resp.json_data.get("InventoryInfo", [])
+ port_info_list = []
+ for port in port_info_data:
+ if port.get("SubPorts"):
+ for subport in port.get("SubPorts"):
+ port_info_list.append(subport["PortNumber"])
+ else:
+ port_info_list.append(port["PortNumber"])
+ # All ports are listed but with "OpticsType": "NotPresent" are shown on UI.
+ non_exist_ports = []
+ for port in prim_ports:
+ if port not in port_info_list:
+ non_exist_ports.append(port)
+ st_port = prim_st + ':' + port
+ payload_ports.append(st_port)
+ if st_port in used_ports:
+ occupied_ports.append(st_port)
+ if non_exist_ports:
+ module.fail_json(msg="{0} Port Numbers {1} does not exist for IOM {2}."
+ .format(iom, (",".join(set(non_exist_ports))), prim_st))
+ if occupied_ports:
+ module.fail_json(msg="Ports {0} are already occupied.".format(",".join(set(occupied_ports))))
+ return payload_ports
+
+
+def validate_networks(module, rest_obj, fabric_id, media_id):
+ resp = rest_obj.invoke_request('POST', APPLICABLE_NETWORKS.format(fabric_id=fabric_id),
+ data={"UplinkType": media_id})
+ vlans = []
+ if resp.json_data.get('ApplicableUplinkNetworks'):
+ vlans = resp.json_data.get('ApplicableUplinkNetworks', [])
+ vlan_payload = []
+ vlan_dict = {}
+ for vlan in vlans:
+ vlan_dict[vlan["Name"]] = vlan["Id"]
+ networks = list(str(net).strip() for net in module.params.get("tagged_networks"))
+ invalids = []
+ for ntw in networks:
+ if vlan_dict.get(ntw):
+ vlan_payload.append(vlan_dict.get(ntw))
+ else:
+ invalids.append(ntw)
+ if invalids:
+ module.fail_json(msg="Networks with names {0} are not applicable or valid.".format(",".join(set(invalids))))
+ return vlan_payload
+
+
+def validate_native_vlan(module, rest_obj, fabric_id, media_id):
+ resp = rest_obj.invoke_request('POST', APPLICABLE_UNTAGGED.format(fabric_id=fabric_id),
+ data={"UplinkType": media_id})
+ vlans = []
+ if resp.json_data.get('ApplicableUplinkNetworks'):
+ vlans = resp.json_data.get('ApplicableUplinkNetworks', [])
+ vlan_id = 0
+ vlan_name = module.params.get("untagged_network")
+ for vlan in vlans:
+ if vlan["Name"] == vlan_name:
+ vlan_id = vlan["VlanMaximum"] # considering tagged vlans take the 'Id'
+ break
+ if not vlan_id:
+ module.fail_json(msg="Native VLAN name {0} is not applicable or valid.".format(vlan_name))
+ return vlan_id
+
+
+def create_uplink(module, rest_obj, fabric_id, uplinks):
+ mparams = module.params
+ mandatory_parmas = ["name", "uplink_type", "tagged_networks"]
+ for prm in mandatory_parmas:
+ if not mparams.get(prm):
+ module.fail_json(msg="Mandatory parameter {0} not provided for uplink creation.".format(prm))
+ media_id, mtypes = get_item_id(rest_obj, mparams["uplink_type"], MEDIA_TYPES)
+ if not media_id:
+ module.fail_json(msg="Uplink Type {0} does not exist.".format(mparams["uplink_type"]))
+ if mparams.get("primary_switch_service_tag") or mparams.get("secondary_switch_service_tag"):
+ if mparams.get("primary_switch_service_tag") == mparams.get("secondary_switch_service_tag"):
+ module.fail_json(msg=SAME_SERVICE_TAG_MSG)
+ payload_port_list = validate_ioms(module, rest_obj, uplinks)
+ else:
+ module.fail_json(msg="Provide port details.")
+ tagged_networks = validate_networks(module, rest_obj, fabric_id, media_id)
+ create_payload = {
+ "Name": mparams["name"],
+ "MediaType": mparams["uplink_type"],
+ "Ports": [{"Id": port} for port in payload_port_list],
+ "Networks": [{"Id": net} for net in tagged_networks]
+ }
+ if mparams.get("untagged_network"):
+ untagged_id = validate_native_vlan(module, rest_obj, fabric_id, media_id)
+ create_payload["NativeVLAN"] = untagged_id
+ if mparams.get("ufd_enable"):
+ create_payload["UfdEnable"] = mparams.get("ufd_enable")
+ if mparams.get("description"):
+ create_payload["Description"] = mparams.get("description")
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHECK_MODE_MSG)
+ resp = rest_obj.invoke_request("POST", UPLINKS_URI.format(fabric_id=fabric_id), data=create_payload)
+ uplink_id = resp.json_data
+ if isinstance(resp.json_data, dict):
+ uplink_id, tmp = get_item_id(rest_obj, mparams["name"], UPLINKS_URI.format(fabric_id=fabric_id))
+ if not uplink_id:
+ uplink_id = ""
+ module.exit_json(changed=True, msg="Successfully created the uplink.", uplink_id=uplink_id,
+ additional_info=resp.json_data)
+ module.exit_json(changed=True, msg="Successfully created the uplink.", uplink_id=uplink_id)
+
+
+def delete_uplink(module, rest_obj, fabric_id, uplink_id):
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHECK_MODE_MSG)
+ rest_obj.invoke_request("DELETE", UPLINK_URI.format(fabric_id=fabric_id, uplink_id=uplink_id))
+ module.exit_json(msg="Successfully deleted the uplink.", changed=True)
+
+
+def modify_uplink(module, rest_obj, fabric_id, uplink, uplinks):
+ mparams = module.params
+ pload_keys = ["Id", "Name", "Description", "MediaType", "NativeVLAN", "UfdEnable", "Ports", "Networks"]
+ modify_payload = dict((pload_key, uplink.get(pload_key)) for pload_key in pload_keys)
+ port_list = list(port["Id"] for port in modify_payload["Ports"])
+ modify_payload["Ports"] = sorted(list(set(port_list)))
+ network_list = list(network["Id"] for network in modify_payload["Networks"])
+ modify_payload["Networks"] = sorted(network_list)
+ modify_data = {}
+ if mparams.get("new_name"):
+ modify_data["Name"] = mparams.get("new_name")
+ if mparams.get("description"):
+ modify_data["Description"] = mparams.get("description")
+ if mparams.get("ufd_enable"):
+ modify_data["UfdEnable"] = mparams.get("ufd_enable")
+ if mparams.get("uplink_type"):
+ if mparams.get("uplink_type") != uplink.get("MediaType"):
+ module.fail_json(msg="Uplink Type cannot be modified.")
+ modify_data["MediaType"] = mparams["uplink_type"]
+ if mparams.get("primary_switch_service_tag") or mparams.get("secondary_switch_service_tag"):
+ if mparams.get("primary_switch_service_tag") == mparams.get("secondary_switch_service_tag"):
+ module.fail_json(msg=SAME_SERVICE_TAG_MSG)
+ payload_port_list = validate_ioms(module, rest_obj, uplinks)
+ modify_data["Ports"] = sorted(list(set(payload_port_list)))
+ media_id, mtypes = get_item_id(rest_obj, uplink.get("MediaType"), MEDIA_TYPES)
+ if mparams.get("tagged_networks") and media_id:
+ tagged_networks = validate_networks(module, rest_obj, fabric_id, media_id)
+ modify_data["Networks"] = sorted(tagged_networks)
+ if mparams.get("untagged_network") and media_id:
+ untagged_id = validate_native_vlan(module, rest_obj, fabric_id, media_id)
+ modify_data["NativeVLAN"] = untagged_id
+ diff = recursive_diff(modify_data, modify_payload)
+ if diff and diff[0]:
+ modify_payload.update(diff[0])
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHECK_MODE_MSG)
+ modify_payload["Ports"] = list({"Id": port} for port in modify_payload["Ports"])
+ modify_payload["Networks"] = list({"Id": net} for net in modify_payload["Networks"])
+ resp = rest_obj.invoke_request("PUT", UPLINK_URI.format(fabric_id=fabric_id, uplink_id=uplink['Id']),
+ data=modify_payload)
+ if isinstance(resp.json_data, dict):
+ module.exit_json(changed=True, msg="Successfully modified the uplink.", uplink_id=uplink['Id'],
+ additional_info=resp.json_data)
+ module.exit_json(changed=True, msg="Successfully modified the uplink.", uplink_id=uplink['Id'])
+ module.exit_json(msg=NO_CHANGES_MSG)
+
+
+def main():
+ specs = {
+ "state": {"choices": ['present', 'absent'], "default": "present"},
+ "fabric_name": {"required": True, "type": "str"},
+ "name": {"required": True, "type": "str"},
+ "new_name": {"type": "str"},
+ "description": {"type": "str"},
+ "uplink_type": {
+ "choices": ['Ethernet', 'FCoE', 'FC Gateway', 'FC Direct Attach', 'Ethernet - No Spanning Tree']},
+ "ufd_enable": {"choices": ['Enabled', 'Disabled']},
+ "primary_switch_service_tag": {"type": "str"},
+ "primary_switch_ports": {"type": "list", "elements": "str"},
+ "secondary_switch_service_tag": {"type": "str"},
+ "secondary_switch_ports": {"type": "list", "elements": "str"},
+ "tagged_networks": {"type": "list", "elements": "str"},
+ "untagged_network": {"type": "str"}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['state', 'present',
+ ('new_name', 'description', 'uplink_type', 'ufd_enable',
+ 'primary_switch_service_tag', 'primary_switch_ports', 'secondary_switch_service_tag',
+ 'secondary_switch_ports', 'tagged_networks', 'untagged_network',), True]],
+ required_together=[["primary_switch_service_tag", "primary_switch_ports"],
+ ["secondary_switch_service_tag", "secondary_switch_ports"]],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ fabric_id, fabrics = get_item_id(rest_obj, module.params["fabric_name"], FABRIC_URI)
+ if not fabric_id:
+ module.fail_json(msg="Fabric with name {0} does not exist.".format(module.params["fabric_name"]))
+ uplink, uplinks = get_item_and_list(rest_obj, module.params["name"],
+ UPLINKS_URI.format(fabric_id=fabric_id) + '?$expand=Ports,Networks')
+ if module.params["state"] == "present":
+ if uplink:
+ uplinks.remove(uplink)
+ modify_uplink(module, rest_obj, fabric_id, uplink, uplinks)
+ create_uplink(module, rest_obj, fabric_id, uplinks)
+ else:
+ if uplink:
+ delete_uplink(module, rest_obj, fabric_id, uplink['Id'])
+ if module.check_mode:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ module.exit_json(msg="Uplink {0} does not exist.".format(module.params["name"]))
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py
new file mode 100644
index 000000000..8c5fa98b3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py
@@ -0,0 +1,993 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.2.0
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_template
+short_description: Create, modify, deploy, delete, export, import and clone a template on OpenManage Enterprise
+version_added: "2.0.0"
+description: "This module creates, modifies, deploys, deletes, exports, imports and clones a template on
+OpenManage Enterprise."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ command:
+ description:
+ - C(create) creates a new template.
+ - C(modify) modifies an existing template.
+ - C(deploy) creates a template-deployment job.
+ - C(delete) deletes an existing template.
+ - C(export) exports an existing template.
+ - C(import) creates a template from a specified configuration text in SCP XML format.
+ - C(clone) creates a clone of a existing template.
+ choices: [create, modify, deploy, delete, export, import, clone]
+ default: create
+ aliases: ['state']
+ type: str
+ template_id:
+ description:
+ - ID of the existing template.
+ - This option is applicable when I(command) is C(modify), C(deploy), C(delete) and C(export).
+ - This option is mutually exclusive with I(template_name).
+ type: int
+ template_name:
+ description:
+ - Name of the existing template.
+ - This option is applicable when I(command) is C(modify), C(deploy), C(delete) and C(export).
+ - This option is mutually exclusive with I(template_id).
+ type: str
+ device_id:
+ description:
+ - >-
+ Specify the list of targeted device ID(s) when I(command) is C(deploy). When I (command) is C(create),
+ specify the ID of a single device.
+ - Either I(device_id) or I(device_service_tag) is mandatory or both can be applicable.
+ type: list
+ elements: int
+ default: []
+ device_service_tag:
+ description:
+ - >-
+ Specify the list of targeted device service tags when I (command) is C(deploy). When I(command) is C(create),
+ specify the service tag of a single device.
+ - Either I(device_id) or I(device_service_tag) is mandatory or both can be applicable.
+ type: list
+ elements: str
+ default: []
+ device_group_names:
+ description:
+ - Specify the list of groups when I (command) is C(deploy).
+ - Provide at least one of the mandatory options I(device_id), I(device_service_tag), or I(device_group_names).
+ type: list
+ elements: str
+ default: []
+ template_view_type:
+ description:
+ - Select the type of view of the OME template.
+ - This is applicable when I(command) is C(create),C(clone) and C(import).
+ choices: [Deployment, Compliance, Inventory, Sample, None]
+ type: str
+ default: Deployment
+ attributes:
+ type: dict
+ description:
+ - >-
+ Payload data for the template operations. All the variables in this option are added as payload for C(create),
+ C(modify), C(deploy), C(import), and C(clone) operations. It takes the following attributes.
+ - >-
+ Attributes: List of dictionaries of attributes (if any) to be modified in the deployment template. This is
+ applicable when I(command) is C(deploy) and C(modify). Use the I(Id) If the attribute Id is available.
+ If not, use the comma separated I (DisplayName). For more details about using the I(DisplayName),
+ see the example provided.
+ - >-
+ Name: Name of the template. This is mandatory when I(command) is C(create), C(import), C(clone), and
+ optional when I(command) is C(modify).
+ - >-
+ Description: Description for the template. This is applicable when I(command) is C(create) or C(modify).
+ - >-
+ Fqdds: This allows to create a template using components from a specified reference server. One or more, of the
+ following values must be specified in a comma-separated string: iDRAC, System, BIOS, NIC, LifeCycleController,
+ RAID, and EventFilters. If none of the values are specified, the default value 'All' is selected.
+ This is applicable when I (command) is C(create).
+ - >-
+ Options: Options to control device shutdown or end power state post template deployment. This is applicable
+ for C(deploy) operation.
+ - >-
+ Schedule: Provides options to schedule the deployment task immediately, or at a specified time. This is
+ applicable when I(command) is C(deploy).
+ - >-
+ NetworkBootIsoModel: Payload to specify the ISO deployment details. This is applicable when I(command) is
+ C(deploy).
+ - >-
+ Content: The XML content of template. This is applicable when I(command) is C(import).
+ - >-
+ Type: Template type ID, indicating the type of device for which configuration is supported, such as chassis
+ and servers. This is applicable when I(command) is C(import).
+ - >-
+ TypeId: Template type ID, indicating the type of device for which configuration is supported, such as chassis
+ and servers. This is applicable when I(command) is C(create).
+ - >-
+ Refer OpenManage Enterprise API Reference Guide for more details.
+requirements:
+ - "python >= 3.8.6"
+author: "Jagadeesh N V (@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create a template from a reference device
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25123
+ attributes:
+ Name: "New Template"
+ Description: "New Template description"
+
+- name: Modify template name, description, and attribute value
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "modify"
+ template_id: 12
+ attributes:
+ Name: "New Custom Template"
+ Description: "Custom Template Description"
+ # Attributes to be modified in the template.
+ # For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails
+ # This section is optional
+ Attributes:
+ - Id: 1234
+ Value: "Test Attribute"
+ IsIgnored: false
+
+- name: Modify template name, description, and attribute using detailed view
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "modify"
+ template_id: 12
+ attributes:
+ Name: "New Custom Template"
+ Description: "Custom Template Description"
+ Attributes:
+ # Enter the comma separated string as appearing in the Detailed view on GUI
+ # NIC -> NIC.Integrated.1-1-1 -> NIC Configuration -> Wake On LAN1
+ - DisplayName: 'NIC, NIC.Integrated.1-1-1, NIC Configuration, Wake On LAN'
+ Value: Enabled
+ IsIgnored: false
+ # System -> LCD Configuration -> LCD 1 User Defined String for LCD
+ - DisplayName: 'System, LCD Configuration, LCD 1 User Defined String for LCD'
+ Value: LCD str by OMAM
+ IsIgnored: false
+
+- name: Deploy template on multiple devices
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "deploy"
+ template_id: 12
+ device_id:
+ - 12765
+ - 10173
+ device_service_tag:
+ - 'SVTG123'
+ - 'SVTG456'
+
+- name: Deploy template on groups
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "deploy"
+ template_id: 12
+ device_group_names:
+ - server_group_1
+ - server_group_2
+
+- name: Deploy template on multiple devices along with the attributes values to be modified on the target devices
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "deploy"
+ template_id: 12
+ device_id:
+ - 12765
+ - 10173
+ device_service_tag:
+ - 'SVTG123'
+ attributes:
+ # Device specific attributes to be modified during deployment.
+ # For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails
+ # This section is optional
+ Attributes:
+ # specific device where attribute to be modified at deployment run-time.
+ # The DeviceId should be mentioned above in the 'device_id' section.
+ # Service tags not allowed.
+ - DeviceId: 12765
+ Attributes:
+ - Id : 15645
+ Value : "0.0.0.0"
+ IsIgnored : false
+ - DeviceId: 10173
+ Attributes:
+ - Id : 18968,
+ Value : "hostname-1"
+ IsIgnored : false
+
+- name: Deploy template and Operating System (OS) on multiple devices
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "deploy"
+ template_id: 12
+ device_id:
+ - 12765
+ device_service_tag:
+ - 'SVTG123'
+ attributes:
+ # Include this to install OS on the devices.
+ # This section is optional
+ NetworkBootIsoModel:
+ BootToNetwork: true
+ ShareType: "NFS"
+ IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours
+ IsoPath: "/home/iso_path/filename.iso"
+ ShareDetail:
+ IpAddress: "192.168.0.2"
+ ShareName: "sharename"
+ User: "share_user"
+ Password: "share_password"
+ Options:
+ EndHostPowerState: 1
+ ShutdownType: 0
+ TimeToWaitBeforeShutdown: 300
+ Schedule:
+ RunLater: true
+ RunNow: false
+
+- name: "Deploy template on multiple devices and changes the device-level attributes. After the template is deployed,
+install OS using its image"
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "deploy"
+ template_id: 12
+ device_id:
+ - 12765
+ - 10173
+ device_service_tag:
+ - 'SVTG123'
+ - 'SVTG456'
+ attributes:
+ Attributes:
+ - DeviceId: 12765
+ Attributes:
+ - Id : 15645
+ Value : "0.0.0.0"
+ IsIgnored : false
+ - DeviceId: 10173
+ Attributes:
+ - Id : 18968,
+ Value : "hostname-1"
+ IsIgnored : false
+ NetworkBootIsoModel:
+ BootToNetwork: true
+ ShareType: "NFS"
+ IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours
+ IsoPath: "/home/iso_path/filename.iso"
+ ShareDetail:
+ IpAddress: "192.168.0.2"
+ ShareName: "sharename"
+ User: "share_user"
+ Password: "share_password"
+ Options:
+ EndHostPowerState: 1
+ ShutdownType: 0
+ TimeToWaitBeforeShutdown: 300
+ Schedule:
+ RunLater: true
+ RunNow: false
+
+- name: Delete template
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "delete"
+ template_id: 12
+
+- name: Export a template
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "export"
+ template_id: 12
+
+# Start of example to export template to a local xml file
+- name: Export template to a local xml file
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "export"
+ template_name: "my_template"
+ register: result
+- name: Save template into a file
+ ansible.builtin.copy:
+ content: "{{ result.Content}}"
+ dest: "/path/to/exported_template.xml"
+# End of example to export template to a local xml file
+
+- name: Clone a template
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "clone"
+ template_id: 12
+ attributes:
+ Name: "New Cloned Template Name"
+
+- name: Import template from XML content
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ attributes:
+ Name: "Imported Template Name"
+ # Template Type from TemplateService/TemplateTypes
+ Type: 2
+ # xml string content
+ Content: "<SystemConfiguration Model=\"PowerEdge R940\" ServiceTag=\"SVCTAG1\"
+ TimeStamp=\"Tue Sep 24 09:20:57.872551 2019\">\n<Component FQDD=\"AHCI.Slot.6-1\">\n<Attribute
+ Name=\"RAIDresetConfig\">True</Attribute>\n<Attribute Name=\"RAIDforeignConfig\">Clear</Attribute>\n
+ </Component>\n<Component FQDD=\"Disk.Direct.0-0:AHCI.Slot.6-1\">\n<Attribute Name=\"RAIDPDState\">Ready
+ </Attribute>\n<Attribute Name=\"RAIDHotSpareStatus\">No</Attribute>\n</Component>\n
+ <Component FQDD=\"Disk.Direct.1-1:AHCI.Slot.6-1\">\n<Attribute Name=\"RAIDPDState\">Ready</Attribute>\n
+ <Attribute Name=\"RAIDHotSpareStatus\">No</Attribute>\n</Component>\n</SystemConfiguration>\n"
+
+- name: Import template from local XML file
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ attributes:
+ Name: "Imported Template Name"
+ Type: 2
+ Content: "{{ lookup('ansible.builtin.file', '/path/to/xmlfile') }}"
+
+- name: "Deploy template and Operating System (OS) on multiple devices."
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "deploy"
+ template_id: 12
+ device_id:
+ - 12765
+ device_service_tag:
+ - 'SVTG123'
+ attributes:
+ # Include this to install OS on the devices.
+ # This section is optional
+ NetworkBootIsoModel:
+ BootToNetwork: true
+ ShareType: "CIFS"
+ IsoTimeout: 1 # allowable values(1,2,4,8,16) in hours
+ IsoPath: "/home/iso_path/filename.iso"
+ ShareDetail:
+ IpAddress: "192.168.0.2"
+ ShareName: "sharename"
+ User: "share_user"
+ Password: "share_password"
+ Options:
+ EndHostPowerState: 1
+ ShutdownType: 0
+ TimeToWaitBeforeShutdown: 300
+ Schedule:
+ RunLater: true
+ RunNow: false
+
+- name: Create a compliance template from reference device
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "create"
+ device_service_tag:
+ - "SVTG123"
+ template_view_type: "Compliance"
+ attributes:
+ Name: "Configuration Compliance"
+ Description: "Configuration Compliance Template"
+ Fqdds: "BIOS"
+
+- name: Import a compliance template from XML file
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ template_view_type: "Compliance"
+ attributes:
+ Name: "Configuration Compliance"
+ Content: "{{ lookup('ansible.builtin.file', './test.xml') }}"
+ Type: 2
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the template operation.
+ returned: always
+ type: str
+ sample: "Successfully created a template with ID 23"
+return_id:
+ description: ID of the template for C(create), C(modify), C(import) and C(clone) or task created in case of C(deploy).
+ returned: success, when I(command) is C(create), C(modify), C(import), C(clone) and C(deploy)
+ type: int
+ sample: 12
+TemplateId:
+ description: ID of the template for C(export).
+ returned: success, when I(command) is C(export)
+ type: int
+ sample: 13
+Content:
+ description: XML content of the exported template. This content can be written to a xml file.
+ returned: success, when I(command) is C(export)
+ type: str
+ sample: "<SystemConfiguration Model=\"PowerEdge R940\" ServiceTag=\"DEFG123\" TimeStamp=\"Tue Sep 24 09:20:57.872551
+ 2019\">\n<Component FQDD=\"AHCI.Slot.6-1\">\n<Attribute Name=\"RAIDresetConfig\">True</Attribute>\n<Attribute
+ Name=\"RAIDforeignConfig\">Clear</Attribute>\n</Component>\n<Component FQDD=\"Disk.Direct.0-0:AHCI.Slot.6-1\">
+ \n<Attribute Name=\"RAIDPDState\">Ready</Attribute>\n<Attribute Name=\"RAIDHotSpareStatus\">No</Attribute>
+ \n</Component>\n<Component FQDD=\"Disk.Direct.1-1:AHCI.Slot.6-1\">\n<Attribute Name=\"RAIDPDState\">Ready
+ </Attribute>\n<Attribute Name=\"RAIDHotSpareStatus\">No</Attribute>\n</Component>\n</SystemConfiguration>"
+devices_assigned:
+ description: Mapping of devices with the templates already deployed on them.
+ returned: I(command) is C(deploy)
+ type: dict
+ sample: {
+ "10362": 28,
+ "10312": 23
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import apply_diff_key
+
+
+TEMPLATES_URI = "TemplateService/Templates"
+TEMPLATE_PATH = "TemplateService/Templates({template_id})"
+TEMPLATE_ACTION = "TemplateService/Actions/TemplateService.{op}"
+TEMPLATE_ATTRIBUTES = "TemplateService/Templates({template_id})/AttributeDetails"
+DEVICE_URI = "DeviceService/Devices"
+GROUP_URI = "GroupService/Groups"
+PROFILE_URI = "ProfileService/Profiles"
+SEPRTR = ','
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+TEMPLATE_NAME_EXISTS = "Template with name '{name}' already exists."
+DEPLOY_DEV_ASSIGNED = "The device(s) '{dev}' have been assigned the template(s) '{temp}' " \
+ "respectively. Please unassign the profiles from the devices."
+
+
+def get_profiles(rest_obj):
+ try:
+ resp = rest_obj.invoke_request('GET', PROFILE_URI)
+ profile_list = resp.json_data.get("value")
+ except Exception:
+ profile_list = []
+ return profile_list
+
+
+def get_group_devices_all(rest_obj, uri):
+ total_items = []
+ next_link = uri
+ while next_link:
+ resp = rest_obj.invoke_request('GET', next_link)
+ data = resp.json_data
+ total_items.extend(data.get("value", []))
+ next_link_list = str(data.get('@odata.nextLink', '')).split('/api')
+ next_link = next_link_list[-1]
+ return total_items
+
+
+def get_group(rest_obj, module, group_name):
+ query_param = {"$filter": "Name eq '{0}'".format(group_name)}
+ group_req = rest_obj.invoke_request("GET", GROUP_URI, query_param=query_param)
+ for grp in group_req.json_data.get('value'):
+ if grp['Name'] == group_name:
+ return grp
+ module.fail_json(msg="Group name '{0}' is invalid. Please provide a valid group name.".format(group_name))
+
+
+def get_group_details(rest_obj, module):
+ group_name_list = module.params.get('device_group_names')
+ device_ids = []
+ for group_name in group_name_list:
+ group = get_group(rest_obj, module, group_name)
+ group_uri = GROUP_URI + "({0})/Devices".format(group['Id'])
+ group_device_list = get_group_devices_all(rest_obj, group_uri)
+ device_ids.extend([dev['Id'] for dev in group_device_list])
+ return device_ids
+
+
+def get_device_ids(module, rest_obj):
+ """Getting the list of device ids filtered from the device inventory."""
+ target_ids = []
+ if module.params.get('device_service_tag') or module.params.get('device_id'):
+ # device_list = get_group_devices_all(rest_obj, DEVICE_URI)
+ device_list = rest_obj.get_all_report_details(DEVICE_URI)['report_list']
+ device_tag_id_map = dict([(device.get('DeviceServiceTag'), device.get('Id')) for device in device_list])
+ device_id = module.params.get('device_id')
+ invalid_ids = set(device_id) - set(device_tag_id_map.values())
+ if invalid_ids:
+ fail_module(module, msg="Unable to complete the operation because the entered target device"
+ " id(s) '{0}' are invalid.".format(",".join(list(map(str, set(invalid_ids))))))
+ target_ids.extend(device_id)
+ service_tags = module.params.get('device_service_tag')
+ invalid_tags = set(service_tags) - set(device_tag_id_map.keys())
+ if invalid_tags:
+ fail_module(module, msg="Unable to complete the operation because the entered target service"
+ " tag(s) '{0}' are invalid.".format(",".join(set(invalid_tags))))
+ for tag in service_tags: # append ids for service tags
+ target_ids.append(device_tag_id_map.get(tag))
+ if module.params.get('device_group_names'):
+ target_ids.extend(get_group_details(rest_obj, module))
+ return list(set(target_ids)) # set to eliminate duplicates
+
+
+def get_view_id(rest_obj, viewstr):
+ resp = rest_obj.invoke_request('GET', "TemplateService/TemplateViewTypes")
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get('Description', "") == viewstr:
+ return xtype.get('Id')
+ viewmap = {"Deployment": 2, "Compliance": 1, "Inventory": 3, "Sample": 4, "None": 0}
+ return viewmap.get(viewstr)
+
+
+def get_type_id_valid(rest_obj, typeid):
+ resp = rest_obj.invoke_request('GET', "TemplateService/TemplateTypes")
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get('Id') == typeid:
+ return True
+ return False
+
+
+def get_template_by_name(template_name, module, rest_obj):
+ template = {}
+ template_path = TEMPLATES_URI
+ query_param = {"$filter": "Name eq '{0}'".format(template_name)}
+ template_req = rest_obj.invoke_request("GET", template_path, query_param=query_param)
+ for each in template_req.json_data.get('value'):
+ if each['Name'] == template_name:
+ template = each
+ break
+ return template
+
+
+def recurse_subattr_list(subgroup, prefix, attr_detailed, attr_map, adv_list):
+ if isinstance(subgroup, list):
+ for each_sub in subgroup:
+ nprfx = "{0}{1}{2}".format(prefix, SEPRTR, each_sub.get("DisplayName"))
+ if each_sub.get("SubAttributeGroups"):
+ recurse_subattr_list(each_sub.get("SubAttributeGroups"), nprfx, attr_detailed, attr_map, adv_list)
+ else:
+ for attr in each_sub.get('Attributes'):
+ attr['prefix'] = nprfx
+ # case sensitive, remove whitespaces for optim
+ constr = "{0}{1}{2}".format(nprfx, SEPRTR, attr['DisplayName'])
+ if constr in adv_list:
+ attr_detailed[constr] = attr['AttributeId']
+ attr_map[attr['AttributeId']] = attr
+
+
+def get_subattr_all(attr_dtls, adv_list):
+ attr_detailed = {}
+ attr_map = {}
+ for each in attr_dtls:
+ recurse_subattr_list(each.get('SubAttributeGroups'), each.get('DisplayName'), attr_detailed, attr_map, adv_list)
+ return attr_detailed, attr_map
+
+
+def attributes_check(module, rest_obj, inp_attr, template_id):
+ diff = 0
+ try:
+ resp = rest_obj.invoke_request("GET", TEMPLATE_ATTRIBUTES.format(template_id=template_id))
+ attr_dtls = resp.json_data
+ disp_adv_list = inp_attr.get("Attributes", {})
+ adv_list = []
+ for attr in disp_adv_list:
+ if attr.get("DisplayName"):
+ split_k = str(attr.get("DisplayName")).split(SEPRTR)
+ trimmed = map(str.strip, split_k)
+ n_k = SEPRTR.join(trimmed)
+ adv_list.append(n_k)
+ attr_detailed, attr_map = get_subattr_all(attr_dtls.get('AttributeGroups'), adv_list)
+ payload_attr = inp_attr.get("Attributes", [])
+ rem_attrs = []
+ for attr in payload_attr:
+ if attr.get("DisplayName"):
+ split_k = str(attr.get("DisplayName")).split(SEPRTR)
+ trimmed = map(str.strip, split_k)
+ n_k = SEPRTR.join(trimmed)
+ id = attr_detailed.get(n_k, "")
+ attr['Id'] = id
+ attr.pop("DisplayName", None)
+ else:
+ id = attr.get('Id')
+ if id:
+ ex_val = attr_map.get(id, {})
+ if not ex_val:
+ rem_attrs.append(attr)
+ continue
+ if attr.get('Value') != ex_val.get("Value") or attr.get('IsIgnored') != ex_val.get("IsIgnored"):
+ diff = diff + 1
+ for rem in rem_attrs:
+ payload_attr.remove(rem)
+ # module.exit_json(attr_detailed=attr_detailed, inp_attr=disp_adv_list, payload_attr=payload_attr, adv_list=adv_list)
+ except Exception:
+ diff = 1
+ return diff
+
+
+def get_create_payload(module, rest_obj, deviceid, view_id):
+ create_payload = {"Fqdds": "All",
+ "ViewTypeId": view_id}
+ attrib_dict = module.params.get("attributes").copy()
+ if isinstance(attrib_dict, dict):
+ typeid = attrib_dict.get("Type") if attrib_dict.get("Type") else attrib_dict.get("TypeId")
+ if typeid:
+ create_payload["TypeId"] = typeid
+ attrib_dict.pop("Type", None) # remove if exists as it is not required for create payload
+ create_payload.update(attrib_dict)
+ template = get_template_by_name(attrib_dict.get("Name"), module, rest_obj)
+ if template:
+ module.exit_json(msg=TEMPLATE_NAME_EXISTS.format(name=attrib_dict.get("Name")))
+ create_payload["SourceDeviceId"] = int(deviceid)
+ return create_payload
+
+
+def get_modify_payload(module, rest_obj, template_dict):
+ modify_payload = {}
+ attrib_dict = module.params.get("attributes")
+ attrib_dict['Id'] = template_dict.get('Id')
+ modify_payload["Name"] = template_dict["Name"]
+ diff = 0
+ if attrib_dict.get("Name", template_dict["Name"]) != template_dict["Name"]:
+ template = get_template_by_name(attrib_dict.get("Name"), module, rest_obj)
+ if template:
+ module.exit_json(msg=TEMPLATE_NAME_EXISTS.format(name=attrib_dict.get("Name")))
+ modify_payload["Name"] = attrib_dict.get("Name")
+ diff = diff + 1
+ modify_payload["Description"] = template_dict["Description"]
+ diff = diff + apply_diff_key(attrib_dict, modify_payload, ["Description"])
+ # check attributes
+ if attrib_dict.get("Attributes"):
+ diff = diff + attributes_check(module, rest_obj, attrib_dict, template_dict.get('Id'))
+
+ if not diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if isinstance(attrib_dict, dict):
+ modify_payload.update(attrib_dict)
+ # module.exit_json(attrib_dict=attrib_dict, modify_payload=modify_payload)
+ return modify_payload
+
+
+def get_deploy_payload(module_params, deviceidlist, template_id):
+ deploy_payload = {}
+ if isinstance(module_params.get("attributes"), dict):
+ deploy_payload.update(module_params.get("attributes"))
+ deploy_payload["Id"] = template_id
+ deploy_payload["TargetIds"] = deviceidlist
+ return deploy_payload
+
+
+def get_import_payload(module, rest_obj, view_id):
+ attrib_dict = module.params.get("attributes").copy()
+ import_payload = {}
+ import_payload["Name"] = attrib_dict.pop("Name")
+ template = get_template_by_name(import_payload["Name"], module, rest_obj)
+ if template:
+ module.exit_json(msg=TEMPLATE_NAME_EXISTS.format(name=import_payload["Name"]))
+ import_payload["ViewTypeId"] = view_id
+ import_payload["Type"] = 2
+ typeid = attrib_dict.get("Type") if attrib_dict.get("Type") else attrib_dict.get("TypeId")
+ if typeid:
+ if get_type_id_valid(rest_obj, typeid):
+ import_payload["Type"] = typeid # Type is mandatory for import
+ else:
+ fail_module(module, msg="Type provided for 'import' operation is invalid")
+ import_payload["Content"] = attrib_dict.pop("Content")
+ if isinstance(attrib_dict, dict):
+ attrib_dict.pop("TypeId", None) # remove if exists as it is not required for import payload
+ import_payload.update(attrib_dict)
+ return import_payload
+
+
+def get_clone_payload(module, rest_obj, template_id, view_id):
+ attrib_dict = module.params.get("attributes").copy()
+ clone_payload = {}
+ clone_payload["SourceTemplateId"] = template_id
+ clone_payload["NewTemplateName"] = attrib_dict.pop("Name")
+ template = get_template_by_name(clone_payload["NewTemplateName"], module, rest_obj)
+ if template:
+ module.exit_json(msg=TEMPLATE_NAME_EXISTS.format(name=clone_payload["NewTemplateName"]))
+ clone_payload["ViewTypeId"] = view_id
+ if isinstance(attrib_dict, dict):
+ clone_payload.update(attrib_dict)
+ return clone_payload
+
+
+def get_template_by_id(module, rest_obj, template_id):
+ path = TEMPLATE_PATH.format(template_id=template_id)
+ template_req = rest_obj.invoke_request("GET", path)
+ if template_req.success:
+ return template_req.json_data
+ else:
+ fail_module(module, msg="Unable to complete the operation because the"
+ " requested template is not present.")
+
+
+def get_template_details(module, rest_obj):
+ id = module.params.get('template_id')
+ query_param = {"$filter": "Id eq {0}".format(id)}
+ srch = 'Id'
+ if not id:
+ id = module.params.get('template_name')
+ query_param = {"$filter": "Name eq '{0}'".format(id)}
+ srch = 'Name'
+ template = {}
+ resp = rest_obj.invoke_request('GET', TEMPLATES_URI, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ template = xtype
+ return template
+
+
+def _get_resource_parameters(module, rest_obj):
+ command = module.params.get("command")
+ rest_method = 'POST'
+ payload = {}
+ template = get_template_details(module, rest_obj)
+ template_id = template.get('Id')
+ # template_name = template.get('Name')
+ if command not in ["import", "create", "delete"] and not template:
+ fail_module(module, msg="Enter a valid template_name or template_id")
+ if command == "create":
+ devid_list = get_device_ids(module, rest_obj)
+ if len(devid_list) != 1:
+ fail_module(module, msg="Create template requires only one reference device")
+ view_id = get_view_id(rest_obj, module.params['template_view_type'])
+ payload = get_create_payload(module, rest_obj, devid_list[0], view_id)
+ path = TEMPLATES_URI
+ elif command == 'import':
+ view_id = get_view_id(rest_obj, module.params['template_view_type'])
+ path = TEMPLATE_ACTION.format(op="Import")
+ payload = get_import_payload(module, rest_obj, view_id)
+ elif command == "delete":
+ if not template:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ path = TEMPLATE_PATH.format(template_id=template_id)
+ rest_method = 'DELETE'
+ elif command == "modify":
+ path = TEMPLATE_PATH.format(template_id=template_id)
+ template_dict = get_template_by_id(module, rest_obj, template_id)
+ payload = get_modify_payload(module, rest_obj, template_dict)
+ rest_method = 'PUT'
+ elif command == "export":
+ path = TEMPLATE_ACTION.format(op="Export")
+ payload = {'TemplateId': template_id}
+ elif command == "deploy":
+ devid_list = get_device_ids(module, rest_obj)
+ if not devid_list:
+ fail_module(module, msg="There are no devices provided for deploy operation")
+ profile_list = get_profiles(rest_obj)
+ dev_temp_map = {}
+ for prof in profile_list:
+ target = prof["TargetId"]
+ if prof["ProfileState"] > 0 and target in devid_list:
+ if template_id == prof['TemplateId']: # already same template deployed
+ devid_list.remove(target)
+ else:
+ dev_temp_map[prof["TargetId"]] = prof['TemplateId']
+ if dev_temp_map:
+ module.exit_json(devices_assigned=dev_temp_map,
+ msg=DEPLOY_DEV_ASSIGNED.format(dev=','.join(map(str, dev_temp_map.keys())),
+ temp=','.join(map(str, dev_temp_map.values()))))
+ if not devid_list:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ path = TEMPLATE_ACTION.format(op="Deploy")
+ payload = get_deploy_payload(module.params, devid_list, template_id)
+ elif command == "clone":
+ view_id = get_view_id(rest_obj, module.params['template_view_type'])
+ path = TEMPLATE_ACTION.format(op="Clone")
+ payload = get_clone_payload(module, rest_obj, template_id, view_id)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ return path, payload, rest_method
+
+
+def _validate_inputs(module):
+ """validates input parameters"""
+ command = module.params.get("command")
+ if command in ["create", "deploy"]:
+ dev_id = module.params["device_id"]
+ dev_st = module.params["device_service_tag"]
+ if None in dev_id or None in dev_st:
+ fail_module(module, msg="Argument device_id or device_service_tag has null values")
+ attrib_dict = {}
+ if module.params.get("attributes"):
+ attrib_dict = module.params.get("attributes")
+ if command in ["import", "clone", "create"]:
+ if not attrib_dict.get("Name"):
+ fail_module(module, msg="Argument 'Name' required in attributes for {0} operation".format(command))
+ if command == "import":
+ if not attrib_dict.get("Content"):
+ fail_module(module, msg="Argument 'Content' required in attributes for {0} operation".format(command))
+
+
+def password_no_log(attributes):
+ if isinstance(attributes, dict):
+ netdict = attributes.get("NetworkBootIsoModel")
+ if isinstance(netdict, dict):
+ sharedet = netdict.get("ShareDetail")
+ if isinstance(sharedet, dict) and 'Password' in sharedet:
+ sharedet['Password'] = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+
+
+def fail_module(module, **failmsg):
+ password_no_log(module.params.get("attributes"))
+ module.fail_json(**failmsg)
+
+
+def exit_module(module, response):
+ password_no_log(module.params.get("attributes"))
+ resp = None
+ my_change = True
+ command = module.params.get('command')
+ result = {}
+ if command in ["create", "modify", "deploy", "import", "clone"]:
+ result["return_id"] = response.json_data
+ resp = result["return_id"]
+ if command == 'deploy' and result["return_id"] == 0:
+ result["failed"] = True
+ command = 'deploy_fail'
+ my_change = False
+ if command == 'export':
+ my_change = False
+ result = response.json_data
+ msg_dict = {'create': "Successfully created a template with ID {0}".format(resp),
+ 'modify': "Successfully modified the template with ID {0}".format(resp),
+ 'deploy': "Successfully created the template-deployment job with ID {0}".format(resp),
+ 'deploy_fail': 'Failed to deploy template.',
+ 'delete': "Deleted successfully",
+ 'export': "Exported successfully",
+ 'import': "Imported successfully",
+ 'clone': "Cloned successfully"}
+ module.exit_json(msg=msg_dict.get(command), changed=my_change, **result)
+
+
+def main():
+ specs = {
+ "command": {"required": False, "default": "create", "aliases": ['state'],
+ "choices": ['create', 'modify', 'deploy', 'delete', 'export', 'import', 'clone']},
+ "template_id": {"required": False, "type": 'int'},
+ "template_name": {"required": False, "type": 'str'},
+ "template_view_type": {"required": False, "default": 'Deployment',
+ "choices": ['Deployment', 'Compliance', 'Inventory', 'Sample', 'None']},
+ "device_id": {"required": False, "type": 'list', "default": [], "elements": 'int'},
+ "device_service_tag": {"required": False, "type": 'list', "default": [], "elements": 'str'},
+ "device_group_names": {"required": False, "type": 'list', "default": [], "elements": 'str'},
+ "attributes": {"required": False, "type": 'dict'},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[
+ ['command', 'create', ['attributes']],
+ ['command', 'modify', ['attributes']],
+ ['command', 'import', ['attributes']],
+ ['command', 'modify', ['template_id', 'template_name'], True],
+ ['command', 'delete', ['template_id', 'template_name'], True],
+ ['command', 'export', ['template_id', 'template_name'], True],
+ ['command', 'clone', ['template_id', 'template_name'], True],
+ ['command', 'deploy', ['template_id', 'template_name'], True],
+ ['command', 'deploy', ['device_id', 'device_service_tag', 'device_group_names'], True],
+ ],
+ mutually_exclusive=[["template_id", "template_name"]],
+ supports_check_mode=True)
+
+ try:
+ _validate_inputs(module)
+ with RestOME(module.params, req_session=True) as rest_obj:
+ path, payload, rest_method = _get_resource_parameters(module, rest_obj)
+ # module.exit_json(payload=payload, path=path)
+ resp = rest_obj.invoke_request(rest_method, path, data=payload)
+ if resp.success:
+ exit_module(module, resp)
+ except HTTPError as err:
+ fail_module(module, msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ password_no_log(module.params.get("attributes"))
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, SSLError, SSLValidationError, ConnectionError, TypeError, ValueError, KeyError, OSError) as err:
+ fail_module(module, msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py
new file mode 100644
index 000000000..701874f70
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_template_identity_pool
+short_description: Attach or detach an identity pool to a requested template on OpenManage Enterprise
+version_added: "2.0.0"
+description: This module allows to-
+ - Attach an identity pool to a requested template on OpenManage Enterprise.
+ - Detach an identity pool from a requested template on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ template_name:
+ description: Name of the template to which an identity pool is attached or detached.
+ type: str
+ required: true
+ identity_pool_name:
+ description: Name of the identity pool.
+ - To attach an identity pool to a template, provide the name of the identity pool.
+ - This option is not applicable when detaching an identity pool from a template.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author: "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Attach an identity pool to a template
+ dellemc.openmanage.ome_template_identity_pool:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: template_name
+ identity_pool_name: identity_pool_name
+
+- name: Detach an identity pool from a template
+ dellemc.openmanage.ome_template_identity_pool:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: template_name
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall identity pool status of the attach or detach operation.
+ returned: always
+ sample: Successfully attached identity pool to template.
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ssl import SSLError
+
+CONFIG_URI = "TemplateService/Actions/TemplateService.UpdateNetworkConfig"
+TEMPLATE_URI = "TemplateService/Templates"
+IDENTITY_URI = "IdentityPoolService/IdentityPools"
+TEMPLATE_ATTRIBUTE_VIEW = "TemplateService/Templates({template_id})/Views(4)/AttributeViewDetails"
+KEY_ATTR_NAME = 'DisplayName'
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+
+
+def get_template_vlan_info(rest_obj, template_id):
+ nic_bonding_tech = ""
+ try:
+ resp = rest_obj.invoke_request('GET', TEMPLATE_ATTRIBUTE_VIEW.format(template_id=template_id))
+ if resp.success:
+ nic_model = resp.json_data.get('AttributeGroups', [])
+ for xnic in nic_model:
+ if xnic.get(KEY_ATTR_NAME) == "NicBondingTechnology":
+ nic_bonding_list = xnic.get("Attributes", [])
+ for xbnd in nic_bonding_list:
+ if xbnd.get(KEY_ATTR_NAME).lower() == "nic bonding technology":
+ nic_bonding_tech = xbnd.get('Value')
+ except Exception:
+ nic_bonding_tech = ""
+ return nic_bonding_tech
+
+
+def get_template_id(rest_obj, module):
+ """Get template id based on requested template name."""
+ template_name = module.params["template_name"]
+ query_param = {"$filter": "Name eq '{0}'".format(template_name)}
+ template_req = rest_obj.invoke_request("GET", TEMPLATE_URI, query_param=query_param)
+ for each in template_req.json_data.get('value'):
+ if each['Name'] == template_name:
+ template = each
+ break
+ else:
+ module.fail_json(msg="Unable to complete the operation because the requested template"
+ " with name '{0}' is not present.".format(template_name))
+ return template
+
+
+def get_identity_id(rest_obj, module):
+ """Get identity pool id based on requested identity pool name."""
+ identity_name = module.params["identity_pool_name"]
+ resp = rest_obj.get_all_report_details(IDENTITY_URI)
+ for each in resp["report_list"]:
+ if each['Name'] == identity_name:
+ identity_id = each['Id']
+ break
+ else:
+ module.fail_json(msg="Unable to complete the operation because the requested identity"
+ " pool with name '{0}' is not present.".format(identity_name))
+ return identity_id
+
+
+def main():
+ specs = {
+ "template_name": {"required": True, "type": "str"},
+ "identity_pool_name": {"required": False, "type": "str"},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ template = get_template_id(rest_obj, module)
+ template_id = template["Id"]
+ identity_id, message = 0, "Successfully detached identity pool from template."
+ if module.params["identity_pool_name"] is not None:
+ identity_id = get_identity_id(rest_obj, module)
+ message = "Successfully attached identity pool to template."
+ nic_bonding_tech = get_template_vlan_info(rest_obj, template_id)
+ payload = {"TemplateId": template_id, "IdentityPoolId": identity_id, "BondingTechnology": nic_bonding_tech}
+ if template["IdentityPoolId"] == identity_id:
+ module.exit_json(changed=False, msg=NO_CHANGES_FOUND)
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHANGES_FOUND)
+ resp = rest_obj.invoke_request("POST", CONFIG_URI, data=payload)
+ if resp.status_code == 200:
+ module.exit_json(msg=message, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (ValueError, TypeError, ConnectionError, SSLError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py
new file mode 100644
index 000000000..e233c5ac5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_template_info
+short_description: Retrieves template details from OpenManage Enterprise
+version_added: "2.0.0"
+description:
+ - This module retrieves the list and details of all the templates on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ template_id:
+ description: Unique Id of the template.
+ type: int
+ system_query_options:
+ description: Options for pagination of the output.
+ type: dict
+ suboptions:
+ filter:
+ description: Filter records by the supported values.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author: "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieve basic details of all templates
+ dellemc.openmanage.ome_template_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve details of a specific template identified by its template ID
+ dellemc.openmanage.ome_template_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 1
+
+- name: Get filtered template info based on name
+ dellemc.openmanage.ome_template_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ system_query_options:
+ filter: "Name eq 'new template'"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall template facts status.
+ returned: on error
+ sample: "Failed to fetch the template facts"
+template_info:
+ type: dict
+ description: Details of the templates.
+ returned: success
+ sample: {
+ "192.168.0.1": {
+ "CreatedBy": "system",
+ "CreationTime": "1970-01-31 00:00:56.372144",
+ "Description": "Tune workload for Performance Optimized Virtualization",
+ "HasIdentityAttributes": false,
+ "Id": 1,
+ "IdentityPoolId": 0,
+ "IsBuiltIn": true,
+ "IsPersistencePolicyValid": false,
+ "IsStatelessAvailable": false,
+ "LastUpdatedBy": null,
+ "LastUpdatedTime": "1970-01-31 00:00:56.372144",
+ "Name": "iDRAC Enable Performance Profile for Virtualization",
+ "SourceDeviceId": 0,
+ "Status": 0,
+ "TaskId": 0,
+ "TypeId": 2,
+ "ViewTypeId": 4
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+def _get_query_parameters(module_params):
+ """Builds query parameter.
+
+ :return: dict
+ :example: {"$filter": Name eq 'template name'}
+ """
+ system_query_param = module_params.get("system_query_options")
+ query_param = {}
+ if system_query_param:
+ query_param = dict([("$" + k, v) for k, v in system_query_param.items() if v is not None])
+ return query_param
+
+
+def main():
+ specs = {
+ "template_id": {"type": 'int', "required": False},
+ "system_query_options": {"required": False, "type": 'dict',
+ "options": {"filter": {"type": 'str', "required": False}}
+ },
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[['template_id', 'system_query_options']],
+ supports_check_mode=True
+ )
+ template_uri = "TemplateService/Templates"
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ query_param = None
+ if module.params.get("template_id") is not None:
+ # Fetch specific template
+ template_id = module.params.get("template_id")
+ template_path = "{0}({1})".format(template_uri, template_id)
+ elif module.params.get("system_query_options") is not None:
+ # Fetch all the templates based on Name
+ query_param = _get_query_parameters(module.params)
+ template_path = template_uri
+ else:
+ # Fetch all templates
+ template_path = template_uri
+ resp = rest_obj.invoke_request('GET', template_path, query_param=query_param)
+ template_facts = resp.json_data
+ if resp.status_code == 200:
+ module.exit_json(template_info={module.params["hostname"]: template_facts})
+ else:
+ module.fail_json(msg="Failed to fetch the template facts")
+ except HTTPError as err:
+ module.fail_json(msg=json.load(err))
+ except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py
new file mode 100644
index 000000000..987a8b610
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py
@@ -0,0 +1,448 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.3.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_template_network_vlan
+short_description: Set tagged and untagged vlans to native network card supported by a template on OpenManage Enterprise
+version_added: "2.0.0"
+description: "This module allows to set tagged and untagged vlans to native network card supported by a template
+on OpenManage Enterprise."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ template_name:
+ description:
+ - Name of the template.
+ - It is mutually exclusive with I(template_id).
+ type: str
+ template_id:
+ description:
+ - Id of the template.
+ - It is mutually exclusive with I(template_name).
+ type: int
+ nic_identifier:
+ description: Display name of NIC port in the template for VLAN configuration.
+ required: true
+ type: str
+ propagate_vlan:
+ description:
+ - To deploy the modified VLAN settings immediately without rebooting the server.
+ - This option will be applied only when there are changes to the VLAN configuration.
+ default: true
+ type: bool
+ version_added: 3.4.0
+ untagged_networks:
+ description: List of untagged networks and their corresponding NIC ports.
+ elements: dict
+ type: list
+ suboptions:
+ port:
+ description: NIC port number of the untagged VLAN.
+ required: true
+ type: int
+ untagged_network_id:
+ description:
+ - ID of the untagged VLAN
+ - Enter 0 to clear the untagged VLAN from the port.
+ - This option is mutually exclusive with I(untagged_network_name)
+ - To get the VLAN network ID use the API U( https://I(hostname)/api/NetworkConfigurationService/Networks)
+ type: int
+ untagged_network_name:
+ description:
+ - name of the vlan for untagging
+ - provide 0 for clearing the untagging for this I(port)
+ - This parameter is mutually exclusive with I(untagged_network_id)
+ type: str
+ tagged_networks:
+ description: List of tagged VLANs and their corresponding NIC ports.
+ type: list
+ elements: dict
+ suboptions:
+ port:
+ description: NIC port number of the tagged VLAN
+ required: true
+ type: int
+ tagged_network_ids:
+ description:
+ - List of IDs of the tagged VLANs
+ - Enter [] to remove the tagged VLAN from a port.
+ - List of I(tagged_network_ids) is combined with list of I(tagged_network_names) when adding tagged VLANs to a port.
+ - To get the VLAN network ID use the API U( https://I(hostname)/api/NetworkConfigurationService/Networks)
+ type: list
+ elements: int
+ tagged_network_names:
+ description:
+ - List of names of tagged VLANs
+ - Enter [] to remove the tagged VLAN from a port.
+ - List of I(tagged_network_names) is combined with list of I(tagged_network_ids) when adding tagged VLANs to a port.
+ type: list
+ elements: str
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Add tagged or untagged VLANs to a template using VLAN ID and name
+ dellemc.openmanage.ome_template_network_vlan:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 78
+ nic_identifier: NIC Slot 4
+ untagged_networks:
+ - port: 1
+ untagged_network_id: 127656
+ - port: 2
+ untagged_network_name: vlan2
+ tagged_networks:
+ - port: 1
+ tagged_network_ids:
+ - 12767
+ - 12768
+ - port: 4
+ tagged_network_ids:
+ - 12767
+ - 12768
+ tagged_network_names:
+ - vlan3
+ - port: 2
+ tagged_network_names:
+ - vlan4
+ - vlan1
+
+- name: Clear the tagged and untagged VLANs from a template
+ dellemc.openmanage.ome_template_network_vlan:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 78
+ nic_identifier: NIC Slot 4
+ untagged_networks:
+ # For removing the untagged VLANs for the port 1 and 2
+ - port: 1
+ untagged_network_id: 0
+ - port: 2
+ untagged_network_name: 0
+ tagged_networks:
+ # For removing the tagged VLANs for port 1, 4 and 2
+ - port: 1
+ tagged_network_ids: []
+ - port: 4
+ tagged_network_ids: []
+ tagged_network_names: []
+ - port: 2
+ tagged_network_names: []
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Overall status of the template vlan operation.
+ returned: always
+ sample: "Successfully applied the network settings to template."
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to complete the request because
+ TemplateId does not exist or is not applicable for the
+ resource URI.",
+ "MessageArgs": [
+ "TemplateId"
+ ],
+ "MessageId": "CGEN1004",
+ "RelatedProperties": [],
+ "Resolution": "Check the request resource URI. Refer to
+ the OpenManage Enterprise-Modular User's Guide for more
+ information about resource URI and its properties.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+NETWORK_HIERARCHY_VIEW = 4 # For Network hierarchy View in a Template
+UPDATE_NETWORK_CONFIG = "TemplateService/Actions/TemplateService.UpdateNetworkConfig"
+TEMPLATE_ATTRIBUTE_VIEW = "TemplateService/Templates({0})/Views({1}" \
+ ")/AttributeViewDetails"
+VLAN_NETWORKS = "NetworkConfigurationService/Networks?$top=9999"
+TEMPLATE_VIEW = "TemplateService/Templates" # Add ?$top=9999 if not query
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+SUCCESS_MSG = "Successfully applied the network settings to the template."
+KEY_ATTR_NAME = 'DisplayName'
+SUB_GRP_ATTR_NAME = 'SubAttributeGroups'
+GRP_ATTR_NAME = 'Attributes'
+GRP_NAME_ID_ATTR_NAME = 'GroupNameId'
+CUSTOM_ID_ATTR_NAME = 'CustomId'
+
+
+def get_template_details(module, rest_obj):
+ id = module.params.get('template_id')
+ query_param = {"$filter": "Id eq {0}".format(id)}
+ srch = 'Id'
+ if not id:
+ id = module.params.get('template_name')
+ query_param = {"$filter": "Name eq '{0}'".format(id)}
+ srch = 'Name'
+ resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ return xtype
+ module.fail_json(msg="Template with {0} '{1}' not found.".format(srch, id))
+
+
+def get_vlan_name_id_map(rest_obj):
+ k = "Name"
+ v = "Id"
+ d = {}
+ resp = rest_obj.invoke_request('GET', VLAN_NETWORKS)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ d[xtype[k]] = xtype[v]
+ return d
+
+
+def get_template_vlan_info(module, rest_obj, template_id):
+ port_id_map = {}
+ port_untagged_map = {}
+ port_tagged_map = {}
+ port_nic_bond_map = {}
+ nic_bonding_tech = ""
+ resp = rest_obj.invoke_request('GET', TEMPLATE_ATTRIBUTE_VIEW.format(
+ template_id, NETWORK_HIERARCHY_VIEW))
+ if resp.success:
+ nic_id = module.params.get("nic_identifier")
+ nic_model = resp.json_data.get('AttributeGroups', [])
+ # nic_group = nic_model[0]['SubAttributeGroups']
+ for xnic in nic_model:
+ if xnic.get(KEY_ATTR_NAME) == "NICModel":
+ nic_group = xnic.get('SubAttributeGroups', [])
+ if xnic.get(KEY_ATTR_NAME) == "NicBondingTechnology":
+ nic_bonding_list = xnic.get("Attributes", [])
+ for xbnd in nic_bonding_list:
+ if xbnd.get(KEY_ATTR_NAME).lower() == "nic bonding technology":
+ nic_bonding_tech = xbnd.get('Value')
+ nic_found = False
+ for nic in nic_group:
+ if nic_id == nic.get(KEY_ATTR_NAME):
+ nic_found = True
+ for port in nic.get(SUB_GRP_ATTR_NAME): # ports
+ for partition in port.get(SUB_GRP_ATTR_NAME): # partitions
+ for attribute in partition.get(GRP_ATTR_NAME): # attributes
+ if attribute.get(CUSTOM_ID_ATTR_NAME) != 0:
+ port_number = port.get(GRP_NAME_ID_ATTR_NAME)
+ port_id_map[port_number] = attribute.get(CUSTOM_ID_ATTR_NAME)
+ if attribute.get(KEY_ATTR_NAME).lower() == "vlan untagged":
+ port_untagged_map[port_number] = int(attribute['Value'])
+ if attribute.get(KEY_ATTR_NAME).lower() == "vlan tagged":
+ port_tagged_map[port_number] = []
+ if attribute['Value']:
+ port_tagged_map[port_number] = \
+ list(map(int, (attribute['Value']).replace(" ", "").split(",")))
+ if attribute.get(KEY_ATTR_NAME).lower() == "nic bonding enabled":
+ port_nic_bond_map[port_number] = attribute['Value']
+ if not nic_found:
+ module.fail_json(msg="NIC with name '{0}' not found for template with id {1}".format(nic_id, template_id))
+ return port_id_map, port_untagged_map, port_tagged_map, port_nic_bond_map, nic_bonding_tech
+
+
+def compare_nested_dict(modify_setting_payload, existing_setting_payload):
+ """compare existing and requested setting values of identity pool in case of modify operations
+ if both are same return True"""
+ for key, val in modify_setting_payload.items():
+ if existing_setting_payload.get(key) is None:
+ return False
+ elif isinstance(val, dict):
+ if not compare_nested_dict(val, existing_setting_payload.get(key)):
+ return False
+ elif val != existing_setting_payload.get(key):
+ return False
+ return True
+
+
+def get_vlan_payload(module, rest_obj, untag_dict, tagged_dict):
+ payload = {}
+ template = get_template_details(module, rest_obj)
+ payload["TemplateId"] = template["Id"]
+ payload["IdentityPoolId"] = template["IdentityPoolId"]
+ # VlanAttributes
+ port_id_map, port_untagged_map, port_tagged_map, port_nic_bond_map, nic_bonding_tech =\
+ get_template_vlan_info(module, rest_obj, template['Id'])
+ payload["BondingTechnology"] = nic_bonding_tech
+ payload["PropagateVlan"] = module.params.get('propagate_vlan')
+ untag_equal_dict = compare_nested_dict(untag_dict, port_untagged_map)
+ tag_equal_dict = compare_nested_dict(tagged_dict, port_tagged_map)
+ if untag_equal_dict and tag_equal_dict:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ vlan_attributes = []
+ for pk, pv in port_id_map.items():
+ mdict = {}
+ if pk in untag_dict or pk in tagged_dict:
+ mdict["Untagged"] = untag_dict.pop(pk, port_untagged_map.get(pk))
+ mdict["Tagged"] = tagged_dict.pop(pk, port_tagged_map.get(pk))
+ mdict["ComponentId"] = port_id_map.get(pk)
+ mdict["IsNicBonded"] = port_nic_bond_map.get(pk)
+ if mdict:
+ vlan_attributes.append(mdict)
+ if untag_dict:
+ module.fail_json(msg="Invalid port(s) {0} found for untagged VLAN".format(untag_dict.keys()))
+ if tagged_dict:
+ module.fail_json(msg="Invalid port(s) {0} found for tagged VLAN".format(tagged_dict.keys()))
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHANGES_FOUND)
+ payload["VlanAttributes"] = vlan_attributes
+ return payload
+
+
+def get_key(val, my_dict):
+ for key, value in my_dict.items():
+ if val == value:
+ return key
+ return None
+
+
+def validate_vlans(module, rest_obj):
+ vlan_name_id_map = get_vlan_name_id_map(rest_obj)
+ vlan_name_id_map["0"] = 0
+ tagged_list = module.params.get("tagged_networks")
+ untag_list = module.params.get("untagged_networks")
+ untag_dict = {}
+ if untag_list:
+ for utg in untag_list:
+ p = utg["port"]
+ if utg.get("untagged_network_id") is not None:
+ if p in untag_dict:
+ module.fail_json(msg="port {0} is repeated for "
+ "untagged_network_id".format(p))
+ vlan = utg.get("untagged_network_id")
+ if vlan not in vlan_name_id_map.values():
+ module.fail_json(msg="untagged_network_id: {0} is not a "
+ "valid vlan id for port {1}".
+ format(vlan, p))
+ untag_dict[p] = vlan
+ if utg.get("untagged_network_name"):
+ vlan = utg.get("untagged_network_name")
+ if vlan in vlan_name_id_map:
+ if p in untag_dict:
+ module.fail_json(msg="port {0} is repeated for "
+ "untagged_network_name".format(p))
+ untag_dict[p] = vlan_name_id_map.get(vlan)
+ else:
+ module.fail_json(msg="{0} is not a valid vlan name for port {1}".format(vlan, p))
+ vlan_name_id_map.pop("0")
+ tagged_dict = {}
+ if tagged_list:
+ for tg in tagged_list:
+ p = tg["port"]
+ tg_list = []
+ empty_list = False
+ tgnids = tg.get("tagged_network_ids")
+ if isinstance(tgnids, list):
+ if len(tgnids) == 0:
+ empty_list = True
+ for vl in tgnids:
+ if vl not in vlan_name_id_map.values():
+ module.fail_json(msg="{0} is not a valid vlan id "
+ "port {1}".format(vl, p))
+ tg_list.append(vl)
+ tgnames = tg.get("tagged_network_names")
+ if isinstance(tgnames, list):
+ if len(tgnames) == 0:
+ empty_list = True
+ for vln in tgnames:
+ if vln not in vlan_name_id_map:
+ module.fail_json(msg="{0} is not a valid vlan name "
+ "port {1}".format(vln, p))
+ tg_list.append(vlan_name_id_map.get(vln))
+ if not tg_list and not empty_list:
+ module.fail_json(msg="No tagged_networks provided or valid tagged_networks not found for port {0}"
+ .format(p))
+ tagged_dict[p] = list(set(tg_list)) # Will not report duplicates
+ for k, v in untag_dict.items():
+ if v in tagged_dict.get(k, []):
+ module.fail_json(msg="vlan {0}('{1}') cannot be in both tagged and untagged list for port {2}".
+ format(v, get_key(v, vlan_name_id_map), k))
+ return untag_dict, tagged_dict
+
+
+def main():
+ port_untagged_spec = {"port": {"required": True, "type": "int"},
+ "untagged_network_id": {"type": "int"},
+ "untagged_network_name": {"type": "str"}}
+ port_tagged_spec = {"port": {"required": True, "type": "int"},
+ "tagged_network_ids": {"type": "list", "elements": "int"},
+ "tagged_network_names": {"type": "list", "elements": "str"}}
+ specs = {
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "nic_identifier": {"required": True, "type": "str"},
+ "untagged_networks": {"required": False, "type": "list", "elements": "dict", "options": port_untagged_spec,
+ "mutually_exclusive": [("untagged_network_id", "untagged_network_name")]},
+ "tagged_networks": {"required": False, "type": "list", "elements": "dict", "options": port_tagged_spec},
+ "propagate_vlan": {"type": "bool", "default": True}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_one_of=[("template_id", "template_name"),
+ ("untagged_networks", "tagged_networks")],
+ mutually_exclusive=[("template_id", "template_name")],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ untag_dict, tagged_dict = validate_vlans(module, rest_obj)
+ payload = get_vlan_payload(module, rest_obj, untag_dict, tagged_dict)
+ resp = rest_obj.invoke_request("POST", UPDATE_NETWORK_CONFIG, data=payload)
+ if resp.success:
+ module.exit_json(msg=SUCCESS_MSG, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py
new file mode 100644
index 000000000..c768b4ca5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_user
+short_description: Create, modify or delete a user on OpenManage Enterprise
+version_added: "2.0.0"
+description: This module creates, modifies or deletes a user on OpenManage Enterprise.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ state:
+ type: str
+ description:
+ - C(present) creates a user in case the I(UserName) provided inside I(attributes) does not exist.
+ - C(present) modifies a user in case the I(UserName) provided inside I(attributes) exists.
+ - C(absent) deletes an existing user.
+ choices: [present, absent]
+ default: present
+ user_id:
+ description:
+ - Unique ID of the user to be deleted.
+ - Either I(user_id) or I(name) is mandatory for C(absent) operation.
+ type: int
+ name:
+ type: str
+ description:
+ - Unique Name of the user to be deleted.
+ - Either I(user_id) or I(name) is mandatory for C(absent) operation.
+ attributes:
+ type: dict
+ default: {}
+ description:
+ - >-
+ Payload data for the user operations. It can take the following attributes for C(present).
+ - >-
+ UserTypeId, DirectoryServiceId, Description, Name, Password, UserName, RoleId, Locked, Enabled.
+ - >-
+ OME will throw error if required parameter is not provided for operation.
+ - >-
+ Refer OpenManage Enterprise API Reference Guide for more details.
+requirements:
+ - "python >= 3.8.6"
+author: "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module does not support C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create user with required parameters
+ dellemc.openmanage.ome_user:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ attributes:
+ UserName: "user1"
+ Password: "UserPassword"
+ RoleId: "10"
+ Enabled: True
+
+- name: Create user with all parameters
+ dellemc.openmanage.ome_user:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ attributes:
+ UserName: "user2"
+ Description: "user2 description"
+ Password: "UserPassword"
+ RoleId: "10"
+ Enabled: True
+ DirectoryServiceId: 0
+ UserTypeId: 1
+ Locked: False
+ Name: "user2"
+
+- name: Modify existing user
+ dellemc.openmanage.ome_user:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ attributes:
+ UserName: "user3"
+ RoleId: "10"
+ Enabled: True
+ Description: "Modify user Description"
+
+- name: Delete existing user using id
+ dellemc.openmanage.ome_user:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ user_id: 1234
+
+- name: Delete existing user using name
+ dellemc.openmanage.ome_user:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ name: "name"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the user operation.
+ returned: always
+ type: str
+ sample: "Successfully created a User"
+user_status:
+ description: Details of the user operation, when I(state) is C(present).
+ returned: When I(state) is C(present).
+ type: dict
+ sample:
+ {
+ "Description": "Test user creation",
+ "DirectoryServiceId": 0,
+ "Enabled": true,
+ "Id": "61546",
+ "IsBuiltin": false,
+ "Locked": false,
+ "Name": "test",
+ "Password": null,
+ "PlainTextPassword": null,
+ "RoleId": "10",
+ "UserName": "test",
+ "UserTypeId": 1
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+def _validate_inputs(module):
+ """both user_id and name are not acceptable in case of state is absent"""
+ state = module.params['state']
+ user_id = module.params.get('user_id')
+ name = module.params.get('name')
+ if state != 'present' and (user_id is None and name is None):
+ fail_module(module, msg="One of the following 'user_id' or 'name' "
+ "option is required for state 'absent'")
+
+
+def get_user_id_from_name(rest_obj, name):
+ """Get the account id using account name"""
+ user_id = None
+ if name is not None:
+ resp = rest_obj.invoke_request('GET', 'AccountService/Accounts')
+ if resp.success:
+ for user in resp.json_data.get('value'):
+ if 'UserName' in user and user['UserName'] == name:
+ return user['Id']
+ return user_id
+
+
+def _get_resource_parameters(module, rest_obj):
+ state = module.params["state"]
+ payload = module.params.get("attributes")
+ if state == "present":
+ name = payload.get('UserName')
+ user_id = get_user_id_from_name(rest_obj, name)
+ if user_id is not None:
+ payload.update({"Id": user_id})
+ path = "AccountService/Accounts('{user_id}')".format(user_id=user_id)
+ method = 'PUT'
+ else:
+ path = "AccountService/Accounts"
+ method = 'POST'
+ else:
+ user_id = module.params.get("user_id")
+ if user_id is None:
+ name = module.params.get('name')
+ user_id = get_user_id_from_name(rest_obj, name)
+ if user_id is None:
+ fail_module(module, msg="Unable to get the account because the specified account "
+ "does not exist in the system.")
+ path = "AccountService/Accounts('{user_id}')".format(user_id=user_id)
+ method = 'DELETE'
+ return method, path, payload
+
+
+def password_no_log(attributes):
+ if isinstance(attributes, dict) and 'Password' in attributes:
+ attributes['Password'] = "VALUE_SPECIFIED_IN_NO_LOG_PARAMETER"
+
+
+def fail_module(module, **failmsg):
+ password_no_log(module.params.get("attributes"))
+ module.fail_json(**failmsg)
+
+
+def exit_module(module, response, http_method):
+ password_no_log(module.params.get("attributes"))
+ msg_dict = {'POST': "Successfully created a User",
+ 'PUT': "Successfully modified a User",
+ 'DELETE': "Successfully deleted the User"}
+ state_msg = msg_dict[http_method]
+ if response.status_code != 204:
+ module.exit_json(msg=state_msg, changed=True, user_status=response.json_data)
+ else:
+ # For delete operation no response content is returned
+ module.exit_json(msg=state_msg, changed=True)
+
+
+def main():
+ specs = {
+ "state": {"required": False, "type": 'str', "default": "present",
+ "choices": ['present', 'absent']},
+ "user_id": {"required": False, "type": 'int'},
+ "name": {"required": False, "type": 'str'},
+ "attributes": {"required": False, "type": 'dict'},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[['user_id', 'name'], ],
+ required_if=[['state', 'present', ['attributes']], ],
+ supports_check_mode=False)
+
+ try:
+ _validate_inputs(module)
+ if module.params.get("attributes") is None:
+ module.params["attributes"] = {}
+ with RestOME(module.params, req_session=True) as rest_obj:
+ method, path, payload = _get_resource_parameters(module, rest_obj)
+ resp = rest_obj.invoke_request(method, path, data=payload)
+ if resp.success:
+ exit_module(module, resp, method)
+ except HTTPError as err:
+ fail_module(module, msg=str(err), user_status=json.load(err))
+ except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ fail_module(module, msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py
new file mode 100644
index 000000000..b42f180fe
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_user_info
+short_description: Retrieves details of all accounts or a specific account on OpenManage Enterprise
+version_added: "2.0.0"
+description:
+ - "This module retrieves the list and basic details of all accounts or details of a specific account on
+ OpenManage Enterprise."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ account_id:
+ description: Unique Id of the account.
+ type: int
+ system_query_options:
+ description: Options for filtering the output.
+ type: dict
+ suboptions:
+ filter:
+ description: Filter records for the supported values.
+ type: str
+requirements:
+ - "python >= 3.8.6"
+author: "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieve basic details of all accounts
+ dellemc.openmanage.ome_user_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve details of a specific account identified by its account ID
+ dellemc.openmanage.ome_user_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ account_id: 1
+
+- name: Get filtered user info based on user name
+ dellemc.openmanage.ome_user_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ system_query_options:
+ filter: "UserName eq 'test'"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Over all status of fetching user facts.
+ returned: on error
+ sample: "Unable to retrieve the account details."
+user_info:
+ type: dict
+ description: Details of the user.
+ returned: success
+ sample: {
+ "192.168.0.1": {
+ "Id": "1814",
+ "UserTypeId": 1,
+ "DirectoryServiceId": 0,
+ "Description": "user name description",
+ "Name": "user_name",
+ "Password": null,
+ "UserName": "user_name",
+ "RoleId": "10",
+ "Locked": false,
+ "IsBuiltin": true,
+ "Enabled": true
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+def _get_query_parameters(module_params):
+ """Builds query parameter.
+
+ :return: dict
+ :example: {"$filter": UserName eq 'user name'}
+ """
+ system_query_param = module_params.get("system_query_options")
+ query_param = {}
+ if system_query_param:
+ query_param = dict([("$" + k, v) for k, v in system_query_param.items() if v is not None])
+ return query_param
+
+
+def main():
+ specs = {
+ "account_id": {"type": 'int', "required": False},
+ "system_query_options": {"required": False, "type": 'dict', "options": {
+ "filter": {"type": 'str', "required": False},
+ }},
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[
+ ('account_id', 'system_query_options')
+ ],
+ supports_check_mode=True
+ )
+ account_uri = "AccountService/Accounts"
+ query_param = None
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ if module.params.get("account_id") is not None:
+ # Fetch specific account
+ account_id = module.params.get("account_id")
+ account_path = "{0}('{1}')".format(account_uri, account_id)
+ elif module.params.get("system_query_options") is not None:
+ # Fetch all the user based on UserName
+ query_param = _get_query_parameters(module.params)
+ account_path = account_uri
+ else:
+ # Fetch all users
+ account_path = account_uri
+ resp = rest_obj.invoke_request('GET', account_path, query_param=query_param)
+ user_facts = resp.json_data
+ user_exists = True
+ if "value" in user_facts and len(user_facts["value"]) == 0:
+ user_exists = False
+ # check for 200 status as GET only returns this for success
+ if resp.status_code == 200 and user_exists:
+ module.exit_json(user_info={module.params["hostname"]: user_facts})
+ else:
+ module.fail_json(msg="Unable to retrieve the account details.")
+ except HTTPError as err:
+ module.fail_json(msg=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py
new file mode 100644
index 000000000..c0a0fc475
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.1.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+
+# see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: redfish_event_subscription
+short_description: Manage Redfish Subscriptions
+version_added: "4.1.0"
+description:
+ This module allows to add or delete Redfish Event subscriptions.
+extends_documentation_fragment:
+ - dellemc.openmanage.redfish_auth_options
+options:
+ destination:
+ description:
+ - The HTTPS URI of the destination to send events.
+ - HTTPS is required.
+ type: str
+ required: True
+ event_type:
+ description:
+ - Specifies the event type to be subscribed.
+ - C(Alert) used to subscribe for alert.
+ - C(MetricReport) used to subscribe for the metrics report.
+ type: str
+ default: Alert
+ choices: [Alert, MetricReport]
+ event_format_type:
+ description:
+ - Specifies the format type of the event to be subscribed.
+ - C(Event) used to subscribe for Event format type.
+ - C(MetricReport) used to subscribe for the metrics report format type.
+ type: str
+ default: Event
+ choices: [Event, MetricReport]
+ state:
+ description:
+ - C(present) adds new event subscription.
+ - C(absent) deletes event subscription with the specified I(destination).
+ type: str
+ default: present
+ choices: ["present", "absent"]
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Trevor Squillario (@TrevorSquillario)"
+ - "Sachin Apagundi (@sachin-apa)"
+notes:
+ - I(event_type) needs to be C(MetricReport) and I(event_format_type) needs to be C(MetricReport) for metrics
+ subscription.
+ - I(event_type) needs to be C(Alert) and I(event_format_type) needs to be C(Event) for event subscription.
+ - Modifying a subscription is not supported.
+ - Context is always set to RedfishEvent.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Add Redfish metric subscription
+ redfish_event_subscription:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ destination: "https://192.168.1.100:8188"
+ event_type: MetricReport
+ event_format_type: MetricReport
+ state: present
+
+- name: Add Redfish alert subscription
+ redfish_event_subscription:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ destination: "https://server01.example.com:8188"
+ event_type: Alert
+ event_format_type: Event
+ state: present
+
+- name: Delete Redfish subscription with a specified destination
+ redfish_event_subscription:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ destination: "https://server01.example.com:8188"
+ state: absent
+"""
+
+RETURN = """
+---
+msg:
+ description: Overall status of the task.
+ returned: always
+ type: str
+ sample: Successfully added the subscription.
+status:
+ description: Returns subscription object created
+ returned: on adding subscription successfully
+ type: dict
+ sample: {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "The resource has been created successfully",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "Base.1.7.Created",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "None",
+ "Severity": "OK"
+ },
+ {
+ "Message": "A new resource is successfully created.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "IDRAC.2.2.SYS414",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "No response action is required.",
+ "Severity": "Informational"
+ }
+ ],
+ "Actions": {
+ "#EventDestination.ResumeSubscription": {
+ "target": "/redfish/v1/EventService/Subscriptions/5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a/Actions/EventDestination.ResumeSubscription"
+ }
+ },
+ "Context": "RedfishEvent",
+ "DeliveryRetryPolicy": "RetryForever",
+ "Description": "Event Subscription Details",
+ "Destination": "https://192.168.1.100:8188",
+ "EventFormatType": "Event",
+ "EventTypes": [
+ "Alert"
+ ],
+ "EventTypes@odata.count": 1,
+ "HttpHeaders": [],
+ "HttpHeaders@odata.count": 0,
+ "Id": "5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a",
+ "MetricReportDefinitions": [],
+ "MetricReportDefinitions@odata.count": 0,
+ "Name": "EventSubscription 5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a",
+ "OriginResources": [],
+ "OriginResources@odata.count": 0,
+ "Protocol": "Redfish",
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ },
+ "SubscriptionType": "RedfishEvent"
+ }
+error_info:
+ type: dict
+ description: Details of http error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to complete the operation because the JSON data format entered is invalid.",
+ "Resolution": "Do the following and the retry the operation:
+ 1) Enter the correct JSON data format and retry the operation.
+ 2) Make sure that no syntax error is present in JSON data format.
+ 3) Make sure that a duplicate key is not present in JSON data format.",
+ "Severity": "Critical"
+ },
+ {
+ "Message": "The request body submitted was malformed JSON and
+ could not be parsed by the receiving service.",
+ "Resolution": "Ensure that the request body is valid JSON and resubmit the request.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.2.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+"""
+
+import json
+import os
+from ssl import SSLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+DESTINATION_INVALID = "The Parameter destination must have an HTTPS destination. The HTTP destination is not allowed"
+SUBSCRIPTION_EXISTS = "No changes found to be applied."
+SUBSCRIPTION_DELETED = "Successfully deleted the subscription."
+SUBSCRIPTION_UNABLE_DEL = "Unable to delete the subscription."
+SUBSCRIPTION_UNABLE_ADD = "Unable to add a subscription."
+SUBSCRIPTION_ADDED = "Successfully added the subscription."
+DESTINATION_MISMATCH = "No changes found to be applied."
+CHANGES_FOUND = "Changes found to be applied."
+
+
+def get_subscription_payload():
+ payload = {
+ "Destination": "https://192.168.1.100:8188",
+ "EventFormatType": "MetricReport",
+ "Context": "RedfishEvent",
+ "Protocol": "Redfish",
+ "EventTypes": ["MetricReport"],
+ "SubscriptionType": "RedfishEvent"
+ }
+ return payload
+
+
+def get_subscription(obj, destination):
+ url = "{0}{1}".format(obj.root_uri, "EventService/Subscriptions")
+ list_resp = obj.invoke_request("GET", url)
+ list_subscriptions = list_resp.json_data["Members"]
+ for list_subscription in list_subscriptions:
+ id = os.path.basename(list_subscription.get('@odata.id'))
+ detail_json = get_subscription_details(obj, id)
+ subscription = get_subscription_payload()
+ if detail_json and detail_json["Destination"] == destination:
+ subscription["Id"] = detail_json["Id"]
+ subscription["Destination"] = detail_json["Destination"]
+ subscription["EventFormatType"] = detail_json["EventFormatType"]
+ subscription["Context"] = detail_json["Context"]
+ subscription["Protocol"] = detail_json["Protocol"]
+ subscription["EventTypes"] = detail_json["EventTypes"]
+ subscription["SubscriptionType"] = detail_json["SubscriptionType"]
+ return subscription
+ return None
+
+
+def get_subscription_details(obj, id):
+ detail_url = "{0}{1}".format(obj.root_uri, "EventService/Subscriptions/%s" % id)
+ detail_resp = obj.invoke_request("GET", detail_url)
+ detail_json = detail_resp.json_data
+ if detail_resp.success:
+ return detail_json
+ else:
+ return None
+
+
+def create_subscription(obj, module):
+ payload = get_subscription_payload()
+ payload["Destination"] = module.params["destination"]
+ payload["EventFormatType"] = module.params["event_format_type"]
+ payload["EventTypes"] = [module.params["event_type"]]
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHANGES_FOUND)
+ resp = obj.invoke_request("POST", "{0}{1}".format(obj.root_uri, "EventService/Subscriptions"), data=payload)
+ return resp
+
+
+def delete_subscription(obj, id):
+ resp = obj.invoke_request("DELETE", "{0}{1}".format(obj.root_uri, "EventService/Subscriptions/%s" % id))
+ return resp
+
+
+def _validate_inputs(module):
+ """validates that destination has https instead of http"""
+ inp_destination = module.params['destination']
+ if not inp_destination.startswith("https"):
+ module.fail_json(msg=DESTINATION_INVALID)
+
+
+def _get_formatted_payload(obj, existing_payload):
+ """get the payload after removing unwanted tags"""
+ existing_payload = obj.strip_substr_dict(existing_payload)
+ return existing_payload
+
+
+def main():
+ specs = {
+ "destination": {"required": True, "type": "str"},
+ "event_type": {"type": "str", "default": "Alert", "choices": ['Alert', 'MetricReport']},
+ "event_format_type": {"type": "str", "default": "Event",
+ "choices": ['Event', 'MetricReport']},
+ "state": {"type": "str", "default": "present", "choices": ['present', 'absent']},
+ }
+ specs.update(redfish_auth_params)
+
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+
+ try:
+ _validate_inputs(module)
+ with Redfish(module.params, req_session=True) as obj:
+ subscription = get_subscription(obj, module.params["destination"])
+ if subscription:
+ if module.params["state"] == "present":
+ module.exit_json(msg=SUBSCRIPTION_EXISTS, changed=False)
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True, msg=CHANGES_FOUND)
+ delete_resp = delete_subscription(obj, subscription["Id"])
+ if delete_resp.success:
+ module.exit_json(msg=SUBSCRIPTION_DELETED, changed=True)
+ else:
+ module.fail_json(msg=SUBSCRIPTION_UNABLE_DEL)
+ else:
+ if module.params["state"] == "present":
+ create_resp = create_subscription(obj, module)
+ if create_resp.success:
+ module.exit_json(msg=SUBSCRIPTION_ADDED, changed=True,
+ status=_get_formatted_payload(obj, create_resp.json_data))
+ else:
+ module.fail_json(msg=SUBSCRIPTION_UNABLE_ADD)
+ else:
+ module.exit_json(msg=DESTINATION_MISMATCH, changed=False)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, IOError, AssertionError, OSError, SSLError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py
new file mode 100644
index 000000000..a03ba0407
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.5.0
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: redfish_firmware
+short_description: To perform a component firmware update using the image file available on the local or remote system
+version_added: "2.1.0"
+description:
+ - This module allows the firmware update of only one component at a time.
+ If the module is run for more than one component, an error message is returned.
+ - Depending on the component, the firmware update is applied after an automatic or manual reboot.
+extends_documentation_fragment:
+ - dellemc.openmanage.redfish_auth_options
+options:
+ image_uri:
+ description:
+ - Firmware Image location URI or local path.
+ - For example- U(http://<web_address>/components.exe) or /home/firmware_repo/component.exe.
+ type: str
+ required: True
+ transfer_protocol:
+ description: Protocol used to transfer the firmware image file. Applicable for URI based update.
+ type: str
+ default: HTTP
+ choices: ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]
+requirements:
+ - "python >= 3.8.6"
+ - "urllib3"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to Redfish APIs.
+ - This module does not support C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Update the firmware from a single executable file available in a HTTP protocol
+ dellemc.openmanage.redfish_firmware:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ image_uri: "http://192.168.0.2/firmware_repo/component.exe"
+ transfer_protocol: "HTTP"
+
+- name: Update the firmware from a single executable file available in a local path
+ dellemc.openmanage.redfish_firmware:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ image_uri: "/home/firmware_repo/component.exe"
+"""
+
+RETURN = """
+---
+msg:
+ description: Overall status of the firmware update task.
+ returned: always
+ type: str
+ sample: Successfully submitted the firmware update task.
+task:
+ description: Returns ID and URI of the created task.
+ returned: success
+ type: dict
+ sample: {
+ "id": "JID_XXXXXXXXXXXX",
+ "uri": "/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXX"
+ }
+error_info:
+ type: dict
+ description: Details of http error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to complete the operation because the JSON data format entered is invalid.",
+ "Resolution": "Do the following and the retry the operation:
+ 1) Enter the correct JSON data format and retry the operation.
+ 2) Make sure that no syntax error is present in JSON data format.
+ 3) Make sure that a duplicate key is not present in JSON data format.",
+ "Severity": "Critical"
+ },
+ {
+ "Message": "The request body submitted was malformed JSON and
+ could not be parsed by the receiving service.",
+ "Resolution": "Ensure that the request body is valid JSON and resubmit the request.",
+ "Severity": "Critical"
+ }
+ ],
+ "code": "Base.1.2.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information."
+ }
+ }
+"""
+
+
+import json
+import os
+from ssl import SSLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+try:
+ from urllib3.fields import RequestField
+ from urllib3.filepost import encode_multipart_formdata
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+UPDATE_SERVICE = "UpdateService"
+JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}"
+
+
+def _encode_form_data(payload_file):
+ """Encode multipart/form-data for file upload."""
+ fields = []
+ f_name, f_data, f_type = payload_file.get("file")
+ f_binary = f_data.read()
+ req_field = RequestField(name="file", data=f_binary, filename=f_name)
+ req_field.make_multipart(content_type=f_type)
+ fields.append(req_field)
+ data, content_type = encode_multipart_formdata(fields)
+ return data, content_type
+
+
+def _get_update_service_target(obj, module):
+ """Returns all the URI which is required for firmware update dynamically."""
+ action_resp = obj.invoke_request("GET", "{0}{1}".format(obj.root_uri, UPDATE_SERVICE))
+ action_attr = action_resp.json_data["Actions"]
+ protocol = module.params["transfer_protocol"]
+ update_uri = None
+ push_uri = action_resp.json_data.get('HttpPushUri')
+ inventory_uri = action_resp.json_data.get('FirmwareInventory').get('@odata.id')
+ if "#UpdateService.SimpleUpdate" in action_attr:
+ update_service = action_attr.get("#UpdateService.SimpleUpdate")
+ proto = update_service.get("TransferProtocol@Redfish.AllowableValues")
+ if isinstance(proto, list) and protocol in proto and 'target' in update_service:
+ update_uri = update_service.get('target')
+ else:
+ module.fail_json(msg="Target firmware version does not support {0} protocol.".format(protocol))
+ if update_uri is None or push_uri is None or inventory_uri is None:
+ module.fail_json(msg="Target firmware version does not support redfish firmware update.")
+ return str(inventory_uri), str(push_uri), str(update_uri)
+
+
+def firmware_update(obj, module):
+ """Firmware update using single binary file from Local path or HTTP location."""
+ image_path = module.params.get("image_uri")
+ trans_proto = module.params["transfer_protocol"]
+ inventory_uri, push_uri, update_uri = _get_update_service_target(obj, module)
+ if image_path.startswith("http"):
+ payload = {"ImageURI": image_path, "TransferProtocol": trans_proto}
+ update_status = obj.invoke_request("POST", update_uri, data=payload)
+ else:
+ resp_inv = obj.invoke_request("GET", inventory_uri)
+ with open(os.path.join(image_path), "rb") as img_file:
+ binary_payload = {"file": (image_path.split(os.sep)[-1], img_file, "multipart/form-data")}
+ data, ctype = _encode_form_data(binary_payload)
+ headers = {"If-Match": resp_inv.headers.get("etag")}
+ headers.update({"Content-Type": ctype})
+ upload_status = obj.invoke_request("POST", push_uri, data=data, headers=headers, dump=False,
+ api_timeout=100)
+ if upload_status.status_code == 201:
+ payload = {"ImageURI": upload_status.headers.get("location")}
+ update_status = obj.invoke_request("POST", update_uri, data=payload)
+ else:
+ update_status = upload_status
+ return update_status
+
+
+def main():
+ specs = {
+ "image_uri": {"required": True, "type": "str"},
+ "transfer_protocol": {"type": "str", "default": "HTTP",
+ "choices": ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]},
+ }
+ specs.update(redfish_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=False)
+ if not HAS_LIB:
+ module.fail_json(msg=missing_required_lib("urllib3"))
+ try:
+ message = "Failed to submit the firmware update task."
+ with Redfish(module.params, req_session=True) as obj:
+ status = firmware_update(obj, module)
+ if status.success:
+ message = "Successfully submitted the firmware update task."
+ task_uri = status.headers.get("Location")
+ job_id = task_uri.split("/")[-1]
+ module.exit_json(msg=message, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, changed=True)
+ module.fail_json(msg=message, error_info=json.loads(status))
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, IOError, AssertionError, OSError, SSLError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py
new file mode 100644
index 000000000..23094b158
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.0.1
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: redfish_powerstate
+short_description: Manage device power state
+version_added: "2.1.0"
+description:
+ - This module allows to manage the different power states of the specified device.
+extends_documentation_fragment:
+ - dellemc.openmanage.redfish_auth_options
+options:
+ resource_id:
+ description:
+ - The unique identifier of the device being managed.
+ For example- U(https://<I(baseuri)>/redfish/v1/Systems/<I(resource_id)>).
+ - This option is mandatory for I(base_uri) with multiple devices.
+ - To get the device details, use the API U(https://<I(baseuri)>/redfish/v1/Systems).
+ required: False
+ type: str
+ reset_type:
+ description:
+ - This option resets the device.
+ - If C(ForceOff), Turns off the device immediately.
+ - If C(ForceOn), Turns on the device immediately.
+ - If C(ForceRestart), Turns off the device immediately, and then restarts the device.
+ - If C(GracefulRestart), Performs graceful shutdown of the device, and then restarts the device.
+ - If C(GracefulShutdown), Performs a graceful shutdown of the device, and the turns off the device.
+ - If C(Nmi), Sends a diagnostic interrupt to the device. This is usually a non-maskable interrupt
+ (NMI) on x86 device.
+ - If C(On), Turns on the device.
+ - If C(PowerCycle), Performs power cycle on the device.
+ - If C(PushPowerButton), Simulates the pressing of a physical power button on the device.
+ - When a power control operation is performed, which is not supported on the device, an error message is displayed
+ with the list of operations that can be performed.
+ required: True
+ type: str
+ choices: ["ForceOff", "ForceOn", "ForceRestart", "GracefulRestart", "GracefulShutdown",
+ "Nmi", "On", "PowerCycle", "PushPowerButton"]
+requirements:
+ - "python >= 3.8.6"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to Redfish APIs.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Manage power state of the first device
+ dellemc.openmanage.redfish_powerstate:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "On"
+
+- name: Manage power state of a specified device
+ dellemc.openmanage.redfish_powerstate:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "ForceOff"
+ resource_id: "System.Embedded.1"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the reset operation.
+ returned: always
+ type: str
+ sample: "Successfully performed the reset type operation 'On'."
+error_info:
+ type: dict
+ description: Details of the HTTP error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to complete the operation because the resource
+ /redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset entered in not found.",
+ "MessageArgs": [
+ "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
+ ],
+ "MessageArgs@odata.count": 1,
+ "MessageId": "IDRAC.2.1.SYS403",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "Enter the correct resource and retry the operation.
+ For information about valid resource,
+ see the Redfish Users Guide available on the support site.",
+ "Severity": "Critical"
+ },
+ ],
+ "code": "Base.1.5.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information"
+ }
+}
+'''
+
+import json
+import re
+from ssl import SSLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+powerstate_map = {}
+
+
+def fetch_power_uri_resource(module, session_obj):
+ try:
+ resource_id = module.params.get("resource_id")
+ static_resource_id_resource = None
+ if resource_id:
+ static_resource_id_resource = "{0}{1}{2}".format(session_obj.root_uri, "Systems/", resource_id)
+ error_message1 = "The target device does not support the system reset feature" \
+ " using Redfish API."
+ system_uri = "{0}{1}".format(session_obj.root_uri, "Systems")
+ system_resp = session_obj.invoke_request("GET", system_uri)
+ system_members = system_resp.json_data.get("Members")
+ if len(system_members) > 1 and static_resource_id_resource is None:
+ module.fail_json(msg="Multiple devices exists in the system, but option 'resource_id' is not specified.")
+ if system_members:
+ resource_id_list = [system_id["@odata.id"] for system_id in system_members if "@odata.id" in system_id]
+ system_id_res = static_resource_id_resource or resource_id_list[0]
+ if system_id_res in resource_id_list:
+ system_id_res_resp = session_obj.invoke_request("GET", system_id_res)
+ system_id_res_data = system_id_res_resp.json_data
+ action_id_res = system_id_res_data.get("Actions")
+ if action_id_res:
+ current_state = system_id_res_data["PowerState"]
+ power_uri = action_id_res['#ComputerSystem.Reset']['target']
+ allowable_enums = action_id_res['#ComputerSystem.Reset']['ResetType@Redfish.AllowableValues']
+ powerstate_map.update(
+ {'power_uri': power_uri, 'allowable_enums': allowable_enums, 'current_state': current_state})
+ else:
+ module.fail_json(msg=error_message1)
+ else:
+ error_message2 = "Invalid device Id '{0}' is provided".format(resource_id)
+ module.fail_json(msg=error_message2)
+ else:
+ module.fail_json(msg=error_message1)
+ except HTTPError as err:
+ if err.code in [404, 405]:
+ module.fail_json(msg=error_message1,
+ error_info=json.load(err))
+ raise err
+
+
+def is_change_applicable_for_power_state(current_power_state, apply_power_state):
+ """ checks if changes are applicable or not for current system state
+ :param current_power_state: Current power state
+ :type current_power_state: str
+ :param apply_power_state: Required power state
+ :type apply_power_state: str
+ :return: boolean True if changes is applicable
+ """
+ on_states = ["On", "PoweringOn"]
+ off_states = ["Off", "PoweringOff"]
+
+ reset_map_apply = {
+ ("On", "ForceOn",): off_states,
+ ("PushPowerButton",): on_states + off_states,
+ ("ForceOff", "ForceRestart", "GracefulRestart", "GracefulShutdown", "Nmi", "PowerCycle",): on_states
+ }
+ is_reset_applicable = False
+ for apply_states, applicable_states in reset_map_apply.items():
+ if apply_power_state in apply_states:
+ if current_power_state in applicable_states:
+ is_reset_applicable = True
+ break
+ break
+ return is_reset_applicable
+
+
+def is_valid_reset_type(reset_type, allowable_enum, module):
+ if reset_type not in allowable_enum:
+ res_list = re.findall('[A-Z][^A-Z]*', reset_type)
+ lw_reset_type = " ".join([word.lower() for word in res_list])
+ error_msg = "The target device does not support a" \
+ " {0} operation.The acceptable values for device reset types" \
+ " are {1}.".format(lw_reset_type, ", ".join(allowable_enum))
+ module.fail_json(msg=error_msg)
+
+
+def run_change_power_state(redfish_session_obj, module):
+ """
+ Apply reset type to system
+ Keyword arguments:
+ redfish_session_obj -- session handle
+ module -- Ansible module obj
+ """
+ apply_reset_type = module.params["reset_type"]
+ fetch_power_uri_resource(module, redfish_session_obj)
+ is_valid_reset_type(apply_reset_type, powerstate_map["allowable_enums"], module)
+ current_power_state = powerstate_map["current_state"]
+ reset_flag = is_change_applicable_for_power_state(current_power_state, apply_reset_type)
+ if module.check_mode is True:
+ if reset_flag is True:
+ module.exit_json(msg="Changes found to be applied.", changed=True)
+ else:
+ module.exit_json(msg="No Changes found to be applied.", changed=False)
+
+ if reset_flag is True:
+ payload = {"ResetType": apply_reset_type}
+ power_uri = powerstate_map["power_uri"]
+ reset_resp = redfish_session_obj.invoke_request("POST", power_uri, data=payload)
+ if reset_resp.success:
+ module.exit_json(msg="Successfully performed the reset type operation"
+ " '{0}'.".format(apply_reset_type), changed=True)
+ else:
+ module.exit_json(msg="Unable to perform the reset type operation '{0}'.".format(apply_reset_type),
+ changed=False)
+ else:
+ module.exit_json(msg="The device is already powered {0}.".format(current_power_state.lower()), changed=False)
+
+
+def main():
+ specs = {
+ "resource_id": {"required": False, "type": "str"},
+ "reset_type": {"required": True, "type": "str",
+ "choices": ['ForceOff', 'ForceOn', 'ForceRestart', 'GracefulRestart',
+ 'GracefulShutdown', 'Nmi', 'On', 'PowerCycle', 'PushPowerButton']},
+ }
+ specs.update(redfish_auth_params)
+
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+ try:
+ with Redfish(module.params) as redfish_obj:
+ run_change_power_state(redfish_obj, module)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError, OSError) as err:
+ module.fail_json(msg=str(err))
+ except Exception as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
new file mode 100644
index 000000000..ce02b4c00
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
@@ -0,0 +1,633 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell EMC OpenManage Ansible Modules
+# Version 5.3.0
+# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: redfish_storage_volume
+short_description: Manages the storage volume configuration
+version_added: "2.1.0"
+description:
+ - This module allows to create, modify, initialize, or delete a single storage volume.
+extends_documentation_fragment:
+ - dellemc.openmanage.redfish_auth_options
+options:
+ controller_id:
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the storage controller.
+ - For example- RAID.Slot.1-1.
+ - This option is mandatory when I(state) is C(present) while creating a volume.
+ type: str
+ volume_id:
+ description:
+ - FQDD of existing volume.
+ - For example- Disk.Virtual.4:RAID.Slot.1-1.
+ - This option is mandatory in the following scenarios,
+ - >-
+ I(state) is C(present), when updating a volume.
+ - >-
+ I(state) is C(absent), when deleting a volume.
+ - >-
+ I(command) is C(initialize), when initializing a volume.
+ type: str
+ state:
+ description:
+ - >-
+ C(present) creates a storage volume for the specified I (controller_id), or modifies the storage volume for the
+ specified I (volume_id).
+ "Note: Modification of an existing volume properties depends on drive and controller capabilities".
+ - C(absent) deletes the volume for the specified I(volume_id).
+ type: str
+ choices: [present, absent]
+ command:
+ description:
+ - C(initialize) initializes an existing storage volume for a specified I(volume_id).
+ type: str
+ choices: [initialize]
+ volume_type:
+ description:
+ - One of the following volume types must be selected to create a volume.
+ - >-
+ C(Mirrored) The volume is a mirrored device.
+ - >-
+ C(NonRedundant) The volume is a non-redundant storage device.
+ - >-
+ C(SpannedMirrors) The volume is a spanned set of mirrored devices.
+ - >-
+ C(SpannedStripesWithParity) The volume is a spanned set of devices which uses parity to retain redundant
+ information.
+ - >-
+ C(StripedWithParity) The volume is a device which uses parity to retain redundant information.
+ type: str
+ choices: [NonRedundant, Mirrored, StripedWithParity, SpannedMirrors, SpannedStripesWithParity]
+ name:
+ description:
+ - Name of the volume to be created.
+ - Only applicable when I(state) is C(present).
+ type: str
+ drives:
+ description:
+ - FQDD of the Physical disks.
+ - For example- Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1.
+ - Only applicable when I(state) is C(present) when creating a new volume.
+ type: list
+ elements: str
+ block_size_bytes:
+ description:
+ - Block size in bytes.Only applicable when I(state) is C(present).
+ type: int
+ capacity_bytes:
+ description:
+ - Volume size in bytes.
+ - Only applicable when I(state) is C(present).
+ type: str
+ optimum_io_size_bytes:
+ description:
+ - Stripe size value must be in multiples of 64 * 1024.
+ - Only applicable when I(state) is C(present).
+ type: int
+ encryption_types:
+ description:
+ - The following encryption types can be selected.
+ - C(ControllerAssisted) The volume is encrypted by the storage controller entity.
+ - C(NativeDriveEncryption) The volume utilizes the native drive encryption capabilities
+ of the drive hardware.
+ - C(SoftwareAssisted) The volume is encrypted by the software running
+ on the system or the operating system.
+ - Only applicable when I(state) is C(present).
+ type: str
+ choices: [NativeDriveEncryption, ControllerAssisted, SoftwareAssisted]
+ encrypted:
+ description:
+ - Indicates whether volume is currently utilizing encryption or not.
+ - Only applicable when I(state) is C(present).
+ type: bool
+ oem:
+ description:
+ - Includes OEM extended payloads.
+ - Only applicable when I(state) is I(present).
+ type: dict
+ initialize_type:
+ description:
+ - Initialization type of existing volume.
+ - Only applicable when I(command) is C(initialize).
+ type: str
+ choices: [Fast, Slow]
+ default: Fast
+
+requirements:
+ - "python >= 3.8.6"
+author: "Sajna Shetty(@Sajna-Shetty)"
+notes:
+ - Run this module from a system that has direct access to Redfish APIs.
+ - This module supports C(check_mode).
+ - This module always reports changes when I(name) and I(volume_id) are not specified.
+ Either I(name) or I(volume_id) is required to support C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Create a volume with supported options
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ volume_type: "Mirrored"
+ name: "VD0"
+ controller_id: "RAID.Slot.1-1"
+ drives:
+ - Disk.Bay.5:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.6:Enclosure.Internal.0-1:RAID.Slot.1-1
+ block_size_bytes: 512
+ capacity_bytes: 299439751168
+ optimum_io_size_bytes: 65536
+ encryption_types: NativeDriveEncryption
+ encrypted: true
+
+- name: Create a volume with minimum options
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ volume_type: "NonRedundant"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+
+- name: Modify a volume's encryption type settings
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ volume_id: "Disk.Virtual.5:RAID.Slot.1-1"
+ encryption_types: "ControllerAssisted"
+ encrypted: true
+
+- name: Delete an existing volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ volume_id: "Disk.Virtual.5:RAID.Slot.1-1"
+
+- name: Initialize an existing volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "initialize"
+ volume_id: "Disk.Virtual.6:RAID.Slot.1-1"
+ initialize_type: "Slow"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: Overall status of the storage configuration operation.
+ returned: always
+ type: str
+ sample: "Successfully submitted create volume task."
+task:
+ type: dict
+ description: Returns ID and URI of the created task.
+ returned: success
+ sample: {
+ "id": "JID_XXXXXXXXXXXXX",
+ "uri": "/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXXX"
+ }
+error_info:
+ type: dict
+ description: Details of a http error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "Unable to perform configuration operations because a
+ configuration job for the device already exists.",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "IDRAC.1.6.STOR023",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "Wait for the current job for the device to complete
+ or cancel the current job before attempting more configuration
+ operations on the device.",
+ "Severity": "Informational"
+ }
+ ],
+ "code": "Base.1.2.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information"
+ }
+ }
+'''
+
+import json
+import copy
+from ssl import SSLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+VOLUME_INITIALIZE_URI = "{storage_base_uri}/Volumes/{volume_id}/Actions/Volume.Initialize"
+DRIVES_URI = "{storage_base_uri}/Drives/{driver_id}"
+CONTROLLER_URI = "{storage_base_uri}/{controller_id}"
+SETTING_VOLUME_ID_URI = "{storage_base_uri}/Volumes/{volume_id}/Settings"
+CONTROLLER_VOLUME_URI = "{storage_base_uri}/{controller_id}/Volumes"
+VOLUME_ID_URI = "{storage_base_uri}/Volumes/{volume_id}"
+storage_collection_map = {}
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+
+
+def fetch_storage_resource(module, session_obj):
+ try:
+ system_uri = "{0}{1}".format(session_obj.root_uri, "Systems")
+ system_resp = session_obj.invoke_request("GET", system_uri)
+ system_members = system_resp.json_data.get("Members")
+ if system_members:
+ system_id_res = system_members[0]["@odata.id"]
+ system_id_res_resp = session_obj.invoke_request("GET", system_id_res)
+ system_id_res_data = system_id_res_resp.json_data.get("Storage")
+ if system_id_res_data:
+ storage_collection_map.update({"storage_base_uri": system_id_res_data["@odata.id"]})
+ else:
+ module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.")
+ else:
+ module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.")
+ except HTTPError as err:
+ if err.code in [404, 405]:
+ module.fail_json(msg="Target out-of-band controller does not support storage feature using Redfish API.",
+ error_info=json.load(err))
+ raise err
+ except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def volume_payload(module):
+ params = module.params
+ drives = params.get("drives")
+ capacity_bytes = params.get("capacity_bytes")
+ physical_disks = []
+ oem = params.get("oem")
+ encrypted = params.get("encrypted")
+ encryption_types = params.get("encryption_types")
+ if capacity_bytes:
+ capacity_bytes = int(capacity_bytes)
+ if drives:
+ storage_base_uri = storage_collection_map["storage_base_uri"]
+ physical_disks = [{"@odata.id": DRIVES_URI.format(storage_base_uri=storage_base_uri,
+ driver_id=drive_id)} for drive_id in drives]
+
+ raid_mapper = {
+ "Name": params.get("name"),
+ "VolumeType": params.get("volume_type"),
+ "BlockSizeBytes": params.get("block_size_bytes"),
+ "CapacityBytes": capacity_bytes,
+ "OptimumIOSizeBytes": params.get("optimum_io_size_bytes"),
+ "Drives": physical_disks
+ }
+ raid_payload = dict([(k, v) for k, v in raid_mapper.items() if v])
+ if oem:
+ raid_payload.update(params.get("oem"))
+ if encrypted is not None:
+ raid_payload.update({"Encrypted": encrypted})
+ if encryption_types:
+ raid_payload.update({"EncryptionTypes": [encryption_types]})
+
+ return raid_payload
+
+
+def check_physical_disk_exists(module, drives):
+ """
+ validation to check if physical disks(drives) available for the specified controller
+ """
+ specified_drives = module.params.get("drives")
+ if specified_drives:
+ existing_drives = []
+ specified_controller_id = module.params.get("controller_id")
+ if drives:
+ for drive in drives:
+ drive_uri = drive['@odata.id']
+ drive_id = drive_uri.split("/")[-1]
+ existing_drives.append(drive_id)
+ else:
+ module.fail_json(msg="No Drive(s) are attached to the specified "
+ "Controller Id: {0}.".format(specified_controller_id))
+ invalid_drives = list(set(specified_drives) - set(existing_drives))
+ if invalid_drives:
+ invalid_drive_msg = ",".join(invalid_drives)
+ module.fail_json(msg="Following Drive(s) {0} are not attached to the "
+ "specified Controller Id: {1}.".format(invalid_drive_msg, specified_controller_id))
+ return True
+
+
+def check_specified_identifier_exists_in_the_system(module, session_obj, uri, err_message):
+ """
+ common validation to check if , specified volume or controller id exist in the system or not
+ """
+ try:
+ resp = session_obj.invoke_request('GET', uri)
+ return resp
+ except HTTPError as err:
+ if err.code == 404:
+ if module.check_mode:
+ return err
+ module.fail_json(msg=err_message)
+ raise err
+ except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def check_controller_id_exists(module, session_obj):
+ """
+ Controller availability Validation
+ """
+ specified_controller_id = module.params.get("controller_id")
+ uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=specified_controller_id)
+ err_message = "Specified Controller {0} does " \
+ "not exist in the System.".format(specified_controller_id)
+ resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, err_message)
+ if resp.success:
+ return check_physical_disk_exists(module, resp.json_data["Drives"])
+ else:
+ module.fail_json(msg="Failed to retrieve the details of the specified Controller Id "
+ "{0}.".format(specified_controller_id))
+
+
+def check_volume_id_exists(module, session_obj, volume_id):
+ """
+ validation to check if volume id is valid in case of modify, delete, initialize operation
+ """
+ uri = VOLUME_ID_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], volume_id=volume_id)
+ err_message = "Specified Volume Id {0} does not exist in the System.".format(volume_id)
+ resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, err_message)
+ return resp
+
+
+def check_initialization_progress(module, session_obj, volume_id):
+ """
+ validation check if any operation is running in specified volume id.
+ """
+ operations = []
+ resp = check_volume_id_exists(module, session_obj, volume_id)
+ if resp.success:
+ operations = resp.json_data["Operations"]
+ return operations
+
+
+def perform_storage_volume_action(method, uri, session_obj, action, payload=None):
+ """
+ common request call for raid creation update delete and initialization
+ """
+ try:
+ resp = session_obj.invoke_request(method, uri, data=payload)
+ task_uri = resp.headers["Location"]
+ return get_success_message(action, task_uri)
+ except (HTTPError, URLError, SSLValidationError, ConnectionError,
+ TypeError, ValueError) as err:
+ raise err
+
+
+def check_mode_validation(module, session_obj, action, uri):
+ volume_id = module.params.get('volume_id')
+ name = module.params.get("name")
+ block_size_bytes = module.params.get("block_size_bytes")
+ capacity_bytes = module.params.get("capacity_bytes")
+ optimum_io_size_bytes = module.params.get("optimum_io_size_bytes")
+ encryption_types = module.params.get("encryption_types")
+ encrypted = module.params.get("encrypted")
+ volume_type = module.params.get("volume_type")
+ drives = module.params.get("drives")
+ if name is None and volume_id is None and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ if action == "create" and name is not None:
+ volume_resp = session_obj.invoke_request("GET", uri)
+ volume_resp_data = volume_resp.json_data
+ if volume_resp_data.get("Members@odata.count") == 0 and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif 0 < volume_resp_data.get("Members@odata.count"):
+ for mem in volume_resp_data.get("Members"):
+ mem_resp = session_obj.invoke_request("GET", mem["@odata.id"])
+ if mem_resp.json_data["Name"] == name:
+ volume_id = mem_resp.json_data["Id"]
+ break
+ if name is not None and module.check_mode and volume_id is None:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ if volume_id is not None:
+ resp = session_obj.invoke_request("GET", SETTING_VOLUME_ID_URI.format(
+ storage_base_uri=storage_collection_map["storage_base_uri"],
+ volume_id=volume_id))
+ resp_data = resp.json_data
+ exist_value = {"Name": resp_data["Name"], "BlockSizeBytes": resp_data["BlockSizeBytes"],
+ "CapacityBytes": resp_data["CapacityBytes"], "Encrypted": resp_data["Encrypted"],
+ "EncryptionTypes": resp_data["EncryptionTypes"][0],
+ "OptimumIOSizeBytes": resp_data["OptimumIOSizeBytes"], "VolumeType": resp_data["VolumeType"]}
+ exit_value_filter = dict([(k, v) for k, v in exist_value.items() if v is not None])
+ cp_exist_value = copy.deepcopy(exit_value_filter)
+ req_value = {"Name": name, "BlockSizeBytes": block_size_bytes,
+ "Encrypted": encrypted, "OptimumIOSizeBytes": optimum_io_size_bytes,
+ "VolumeType": volume_type, "EncryptionTypes": encryption_types}
+ if capacity_bytes is not None:
+ req_value["CapacityBytes"] = int(capacity_bytes)
+ req_value_filter = dict([(k, v) for k, v in req_value.items() if v is not None])
+ cp_exist_value.update(req_value_filter)
+ exist_drive, req_drive = [], []
+ if resp_data["Links"]:
+ exist_drive = [disk["@odata.id"].split("/")[-1] for disk in resp_data["Links"]["Drives"]]
+ if drives is not None:
+ req_drive = sorted(drives)
+ diff_changes = [bool(set(exit_value_filter.items()) ^ set(cp_exist_value.items())) or
+ bool(set(exist_drive) ^ set(req_drive))]
+ if module.check_mode and any(diff_changes) is True:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif (module.check_mode and any(diff_changes) is False) or \
+ (not module.check_mode and any(diff_changes) is False):
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ return None
+
+
+def perform_volume_create_modify(module, session_obj):
+ """
+ perform volume creation and modification for state present
+ """
+ specified_controller_id = module.params.get("controller_id")
+ volume_id = module.params.get("volume_id")
+ if specified_controller_id is not None:
+ check_controller_id_exists(module, session_obj)
+ uri = CONTROLLER_VOLUME_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"],
+ controller_id=specified_controller_id)
+ method = "POST"
+ action = "create"
+ else:
+ resp = check_volume_id_exists(module, session_obj, volume_id)
+ if resp.success:
+ uri = SETTING_VOLUME_ID_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"],
+ volume_id=volume_id)
+ method = "PATCH"
+ action = "modify"
+ payload = volume_payload(module)
+ check_mode_validation(module, session_obj, action, uri)
+ if not payload:
+ module.fail_json(msg="Input options are not provided for the {0} volume task.".format(action))
+ return perform_storage_volume_action(method, uri, session_obj, action, payload)
+
+
+def perform_volume_deletion(module, session_obj):
+ """
+ perform volume deletion for state absent
+ """
+ volume_id = module.params.get("volume_id")
+ if volume_id:
+ resp = check_volume_id_exists(module, session_obj, volume_id)
+ if hasattr(resp, "success") and resp.success and not module.check_mode:
+ uri = VOLUME_ID_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], volume_id=volume_id)
+ method = "DELETE"
+ return perform_storage_volume_action(method, uri, session_obj, "delete")
+ elif hasattr(resp, "success") and resp.success and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif hasattr(resp, "code") and resp.code == 404 and module.check_mode:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ module.fail_json(msg="'volume_id' option is a required property for deleting a volume.")
+
+
+def perform_volume_initialization(module, session_obj):
+ """
+ perform volume initialization for command initialize
+ """
+ specified_volume_id = module.params.get("volume_id")
+ if specified_volume_id:
+ operations = check_initialization_progress(module, session_obj, specified_volume_id)
+ if operations:
+ operation_message = "Cannot perform the configuration operations because a " \
+ "configuration job for the device already exists."
+ operation_name = operations[0].get("OperationName")
+ percentage_complete = operations[0].get("PercentageComplete")
+ if operation_name and percentage_complete:
+ operation_message = "Cannot perform the configuration operation because the configuration job '{0}'" \
+ " in progress is at '{1}' percentage.".format(operation_name, percentage_complete)
+ module.fail_json(msg=operation_message)
+ else:
+ method = "POST"
+ uri = VOLUME_INITIALIZE_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"],
+ volume_id=specified_volume_id)
+ payload = {"InitializeType": module.params["initialize_type"]}
+ return perform_storage_volume_action(method, uri, session_obj, "initialize", payload)
+ else:
+ module.fail_json(msg="'volume_id' option is a required property for initializing a volume.")
+
+
+def configure_raid_operation(module, session_obj):
+ """
+ configure raid action based on state and command input
+ """
+ module_params = module.params
+ state = module_params.get("state")
+ command = module_params.get("command")
+ if state is not None and state == "present":
+ return perform_volume_create_modify(module, session_obj)
+ elif state is not None and state == "absent":
+ return perform_volume_deletion(module, session_obj)
+ elif command is not None and command == "initialize":
+ return perform_volume_initialization(module, session_obj)
+
+
+def get_success_message(action, task_uri):
+ """
+ message for different types of raid actions
+ """
+ msg = "Successfully submitted {0} volume task.".format(action)
+ status_message = {"msg": msg}
+ if task_uri is not None:
+ task_id = task_uri.split("/")[-1]
+ status_message.update({"task_uri": task_uri, "task_id": task_id})
+ return status_message
+
+
+def validate_inputs(module):
+ """
+ validation check for state and command input for null values.
+ """
+ module_params = module.params
+ state = module_params.get("state")
+ command = module_params.get("command")
+ if state is None and command is None:
+ module.fail_json(msg="Either state or command should be provided to further actions.")
+ elif state == "present" and\
+ module_params.get("controller_id") is None and\
+ module_params.get("volume_id") is None:
+ module.fail_json(msg="When state is present, either controller_id or"
+ " volume_id must be specified to perform further actions.")
+
+
+def main():
+ specs = {
+ "state": {"type": "str", "required": False, "choices": ['present', 'absent']},
+ "command": {"type": "str", "required": False, "choices": ['initialize']},
+ "volume_type": {"type": "str", "required": False,
+ "choices": ['NonRedundant', 'Mirrored',
+ 'StripedWithParity', 'SpannedMirrors',
+ 'SpannedStripesWithParity']},
+ "name": {"required": False, "type": "str"},
+ "controller_id": {"required": False, "type": "str"},
+ "drives": {"elements": "str", "required": False, "type": "list"},
+ "block_size_bytes": {"required": False, "type": "int"},
+ "capacity_bytes": {"required": False, "type": "str"},
+ "optimum_io_size_bytes": {"required": False, "type": "int"},
+ "encryption_types": {"type": "str", "required": False,
+ "choices": ['NativeDriveEncryption', 'ControllerAssisted', 'SoftwareAssisted']},
+ "encrypted": {"required": False, "type": "bool"},
+ "volume_id": {"required": False, "type": "str"},
+ "oem": {"required": False, "type": "dict"},
+ "initialize_type": {"type": "str", "required": False, "choices": ['Fast', 'Slow'], "default": "Fast"},
+ }
+
+ specs.update(redfish_auth_params)
+
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[['state', 'command']],
+ required_one_of=[['state', 'command']],
+ required_if=[['command', 'initialize', ['volume_id']],
+ ['state', 'absent', ['volume_id']], ],
+ supports_check_mode=True)
+
+ try:
+ validate_inputs(module)
+ with Redfish(module.params, req_session=True) as session_obj:
+ fetch_storage_resource(module, session_obj)
+ status_message = configure_raid_operation(module, session_obj)
+ task_status = {"uri": status_message.get("task_uri"), "id": status_message.get("task_id")}
+ module.exit_json(msg=status_message["msg"], task=task_status, changed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except (URLError, SSLValidationError, ConnectionError, ImportError, ValueError,
+ RuntimeError, TypeError, OSError, SSLError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()